code stringlengths 87 55.2k | code_codestyle int64 0 349 | style_context stringlengths 135 49.1k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
from math import isclose, sqrt
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Dict:
lowerCAmelCase_ : List[Any] = point_y / 4 / point_x
lowerCAmelCase_ : Any = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
lowerCAmelCase_ : Tuple = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
lowerCAmelCase_ : str = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
lowerCAmelCase_ : Tuple = outgoing_gradient**2 + 4
lowerCAmelCase_ : List[str] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
lowerCAmelCase_ : Dict = (point_y - outgoing_gradient * point_x) ** 2 - 100
lowerCAmelCase_ : Optional[Any] = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
lowerCAmelCase_ : List[Any] = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
lowerCAmelCase_ : Dict = x_minus if isclose(_lowercase , _lowercase ) else x_plus
lowerCAmelCase_ : Optional[Any] = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def lowerCAmelCase ( lowerCAmelCase_ = 1.4 , lowerCAmelCase_ = -9.6 )-> Tuple:
lowerCAmelCase_ : int = 0
lowerCAmelCase_ : float = first_x_coord
lowerCAmelCase_ : float = first_y_coord
lowerCAmelCase_ : float = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
lowerCAmelCase_ : List[Any] = next_point(_lowercase , _lowercase , _lowercase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f"""{solution() = }""") | 262 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue_model_parallelism.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_6_0_0, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_6_0_0, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
] )
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self: Any ) -> str:
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=snake_case , )
assert hasattr(self , """env""" )
def lowerCAmelCase_ ( self: int , snake_case: Dict ) -> List[Any]:
# configuration for running training on smdistributed Model Parallel
snake_case_ :Tuple = {
"""enabled""": True,
"""processes_per_host""": 8,
}
snake_case_ :List[Any] = {
"""enabled""": True,
"""parameters""": {
"""microbatches""": 4,
"""placement_strategy""": """spread""",
"""pipeline""": """interleaved""",
"""optimize""": """speed""",
"""partitions""": 4,
"""ddp""": True,
},
}
snake_case_ :Tuple = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options}
snake_case_ :Any = """trainer""" if self.script == """run_glue.py""" else """smtrainer"""
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=snake_case , instance_type=self.instance_type , debugger_hook_config=snake_case , hyperparameters={
**self.env.hyperparameters,
"""model_name_or_path""": self.model_name_or_path,
"""max_steps""": 500,
} , metric_definitions=self.env.metric_definitions , distribution=snake_case , py_version="""py36""" , )
def lowerCAmelCase_ ( self: Any , snake_case: Tuple ) -> List[str]:
TrainingJobAnalytics(snake_case ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def lowerCAmelCase_ ( self: Dict , snake_case: Dict ) -> List[Any]:
# create estimator
snake_case_ :List[Any] = self.create_estimator(snake_case )
# run training
estimator.fit()
# result dataframe
snake_case_ :Any = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
snake_case_ :Tuple = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
snake_case_ :Dict = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
snake_case_ :int = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , snake_case )
| 66 | 0 |
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __snake_case ( __UpperCamelCase : Dict ):
"""simple docstring"""
A_ = analyze_text(_lowercase )
A_ = list(" " + ascii_lowercase )
# what is our total sum of probabilities.
A_ = sum(single_char_strings.values() )
# one length string
A_ = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
A_ = single_char_strings[ch]
A_ = my_str / all_sum
my_fir_sum += prob * math.loga(_lowercase ) # entropy formula.
# print entropy
print(f'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
A_ = sum(two_char_strings.values() )
A_ = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
A_ = cha + cha
if sequence in two_char_strings:
A_ = two_char_strings[sequence]
A_ = int(_lowercase ) / all_sum
my_sec_sum += prob * math.loga(_lowercase )
# print second entropy
print(f'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
A_ = Counter() # type: ignore
A_ = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 ,len(_lowercase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __snake_case ( ):
"""simple docstring"""
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main() | 312 |
"""simple docstring"""
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class lowerCamelCase :
'''simple docstring'''
def __init__( self: Dict , snake_case: Optional[Any] , snake_case: Tuple=13 , snake_case: Any=32 , snake_case: Union[str, Any]=2 , snake_case: Tuple=3 , snake_case: Union[str, Any]=16 , snake_case: Union[str, Any]=[1, 2, 1] , snake_case: Optional[Any]=[2, 2, 4] , snake_case: str=2 , snake_case: List[str]=2.0 , snake_case: Optional[int]=True , snake_case: Union[str, Any]=0.0 , snake_case: Optional[int]=0.0 , snake_case: Optional[Any]=0.1 , snake_case: List[str]="gelu" , snake_case: Any=False , snake_case: Optional[Any]=True , snake_case: Optional[int]=0.0_2 , snake_case: Any=1E-5 , snake_case: Optional[int]=True , snake_case: int=None , snake_case: Any=True , snake_case: str=10 , snake_case: Optional[Any]=8 , snake_case: Union[str, Any]=["stage1", "stage2", "stage3"] , snake_case: Tuple=[1, 2, 3] , ) -> Dict:
snake_case_ :Dict = parent
snake_case_ :List[Any] = batch_size
snake_case_ :Dict = image_size
snake_case_ :Dict = patch_size
snake_case_ :Tuple = num_channels
snake_case_ :List[Any] = embed_dim
snake_case_ :List[str] = depths
snake_case_ :str = num_heads
snake_case_ :Tuple = window_size
snake_case_ :Tuple = mlp_ratio
snake_case_ :int = qkv_bias
snake_case_ :Tuple = hidden_dropout_prob
snake_case_ :Optional[Any] = attention_probs_dropout_prob
snake_case_ :Dict = drop_path_rate
snake_case_ :Any = hidden_act
snake_case_ :Any = use_absolute_embeddings
snake_case_ :int = patch_norm
snake_case_ :List[Any] = layer_norm_eps
snake_case_ :Tuple = initializer_range
snake_case_ :str = is_training
snake_case_ :int = scope
snake_case_ :Tuple = use_labels
snake_case_ :Tuple = type_sequence_label_size
snake_case_ :str = encoder_stride
snake_case_ :List[Any] = out_features
snake_case_ :str = out_indices
def lowerCAmelCase_ ( self: Tuple ) -> Dict:
snake_case_ :Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ :str = None
if self.use_labels:
snake_case_ :Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ :Union[str, Any] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ ( self: int ) -> Optional[Any]:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def lowerCAmelCase_ ( self: List[Any] , snake_case: str , snake_case: int , snake_case: List[str] ) -> Any:
snake_case_ :Dict = MaskFormerSwinModel(config=snake_case )
model.to(snake_case )
model.eval()
snake_case_ :Tuple = model(snake_case )
snake_case_ :Dict = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
snake_case_ :Any = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowerCAmelCase_ ( self: Optional[Any] , snake_case: int , snake_case: List[str] , snake_case: Tuple ) -> Union[str, Any]:
snake_case_ :Any = MaskFormerSwinBackbone(config=snake_case )
model.to(snake_case )
model.eval()
snake_case_ :Optional[Any] = model(snake_case )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(snake_case ):
snake_case_ :Optional[Any] = ["""stem"""]
snake_case_ :str = MaskFormerSwinBackbone(config=snake_case )
def lowerCAmelCase_ ( self: List[str] ) -> Optional[Any]:
snake_case_ :Optional[int] = self.prepare_config_and_inputs()
snake_case_, snake_case_, snake_case_ :str = config_and_inputs
snake_case_ :Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_A : Union[str, Any] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
_A : str = {"""feature-extraction""": MaskFormerSwinModel} if is_torch_available() else {}
_A : List[str] = False
_A : Any = False
_A : Dict = False
_A : List[Any] = False
_A : Optional[int] = False
def lowerCAmelCase_ ( self: Dict ) -> Any:
snake_case_ :str = MaskFormerSwinModelTester(self )
snake_case_ :Optional[Any] = ConfigTester(self , config_class=snake_case , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def lowerCAmelCase_ ( self: List[str] ) -> Optional[int]:
pass
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase_ ( self: Any ) -> Tuple:
return
def lowerCAmelCase_ ( self: Any ) -> Any:
snake_case_ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def lowerCAmelCase_ ( self: Union[str, Any] ) -> int:
snake_case_ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*snake_case )
@unittest.skip("""Swin does not use inputs_embeds""" )
def lowerCAmelCase_ ( self: str ) -> List[str]:
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def lowerCAmelCase_ ( self: int ) -> Optional[int]:
pass
def lowerCAmelCase_ ( self: List[str] ) -> List[Any]:
snake_case_, snake_case_ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ :str = model_class(snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case_ :Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case , nn.Linear ) )
def lowerCAmelCase_ ( self: Tuple ) -> Dict:
snake_case_, snake_case_ :int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ :Optional[int] = model_class(snake_case )
snake_case_ :str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ :str = [*signature.parameters.keys()]
snake_case_ :str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def lowerCAmelCase_ ( self: List[Any] ) -> List[Any]:
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def lowerCAmelCase_ ( self: Dict ) -> List[Any]:
pass
def lowerCAmelCase_ ( self: Union[str, Any] , snake_case: Union[str, Any] , snake_case: int , snake_case: Any , snake_case: List[str] ) -> str:
snake_case_ :List[str] = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
snake_case_ :List[Any] = model(**self._prepare_for_class(snake_case , snake_case ) )
snake_case_ :Any = outputs.hidden_states
snake_case_ :Optional[int] = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(snake_case ) , snake_case )
# Swin has a different seq_length
snake_case_ :str = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case_ :int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowerCAmelCase_ ( self: List[Any] ) -> Optional[int]:
snake_case_, snake_case_ :Any = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ :List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
snake_case_ :Tuple = True
self.check_hidden_states_output(snake_case , snake_case , snake_case , snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ :List[Any] = True
self.check_hidden_states_output(snake_case , snake_case , snake_case , snake_case )
def lowerCAmelCase_ ( self: Optional[Any] ) -> Tuple:
snake_case_, snake_case_ :int = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ :List[Any] = 3
snake_case_ :List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
snake_case_ :Any = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case_ :Tuple = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
snake_case_ :List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
snake_case_ :str = True
self.check_hidden_states_output(snake_case , snake_case , snake_case , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ :Any = True
self.check_hidden_states_output(snake_case , snake_case , snake_case , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def lowerCAmelCase_ ( self: Union[str, Any] ) -> List[str]:
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def lowerCAmelCase_ ( self: List[str] ) -> str:
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def lowerCAmelCase_ ( self: str ) -> List[Any]:
pass
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Optional[Any]:
snake_case_, snake_case_ :Dict = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(snake_case: str ):
snake_case_ :Optional[int] = 0
return t
def check_equivalence(snake_case: List[Any] , snake_case: Union[str, Any] , snake_case: int , snake_case: Tuple={} ):
with torch.no_grad():
snake_case_ :List[Any] = model(**snake_case , return_dict=snake_case , **snake_case )
snake_case_ :Any = model(**snake_case , return_dict=snake_case , **snake_case ).to_tuple()
def recursive_check(snake_case: List[Any] , snake_case: int ):
if isinstance(snake_case , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(snake_case , snake_case ):
recursive_check(snake_case , snake_case )
elif isinstance(snake_case , snake_case ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(snake_case , snake_case )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(snake_case ) , set_nan_tensor_to_zero(snake_case ) , atol=1E-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
f""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"""
f""" {torch.isnan(snake_case ).any()} and `inf`: {torch.isinf(snake_case )}. Dict has"""
f""" `nan`: {torch.isnan(snake_case ).any()} and `inf`: {torch.isinf(snake_case )}."""
) , )
recursive_check(snake_case , snake_case )
for model_class in self.all_model_classes:
snake_case_ :int = model_class(snake_case )
model.to(snake_case )
model.eval()
snake_case_ :Any = self._prepare_for_class(snake_case , snake_case )
snake_case_ :List[Any] = self._prepare_for_class(snake_case , snake_case )
check_equivalence(snake_case , snake_case , snake_case )
snake_case_ :Tuple = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
snake_case_ :Dict = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
check_equivalence(snake_case , snake_case , snake_case )
snake_case_ :Tuple = self._prepare_for_class(snake_case , snake_case )
snake_case_ :Any = self._prepare_for_class(snake_case , snake_case )
check_equivalence(snake_case , snake_case , snake_case , {"""output_hidden_states""": True} )
snake_case_ :Dict = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
snake_case_ :List[str] = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
check_equivalence(snake_case , snake_case , snake_case , {"""output_hidden_states""": True} )
@require_torch
class lowerCamelCase ( unittest.TestCase , _lowerCAmelCase ):
'''simple docstring'''
_A : int = (MaskFormerSwinBackbone,) if is_torch_available() else ()
_A : Tuple = MaskFormerSwinConfig
def lowerCAmelCase_ ( self: List[str] ) -> Optional[int]:
snake_case_ :Optional[Any] = MaskFormerSwinModelTester(self )
def lowerCAmelCase_ ( self: int ) -> Optional[int]:
snake_case_, snake_case_ :Any = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ :Tuple = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
snake_case_ :List[str] = backbone_class(snake_case )
backbone.to(snake_case )
backbone.eval()
snake_case_ :List[Any] = backbone(**snake_case )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , snake_case )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
snake_case_ :Union[str, Any] = backbone(**snake_case , output_hidden_states=snake_case )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
snake_case_, snake_case_, snake_case_ :List[Any] = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
snake_case_ :List[Any] = backbone(**snake_case , output_attentions=snake_case )
self.assertIsNotNone(outputs.attentions )
| 66 | 0 |
from math import factorial
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
if n < k or k < 0:
raise ValueError('Please enter positive integers for n and k where n >= k' )
return factorial(_lowercase ) // (factorial(_lowercase ) * factorial(n - k ))
if __name__ == "__main__":
print(
"""The number of five-card hands possible from a standard""",
F"""fifty-two card deck is: {combinations(52, 5)}\n""",
)
print(
"""If a class of 40 students must be arranged into groups of""",
F"""4 for group projects, there are {combinations(40, 4)} ways""",
"""to arrange them.\n""",
)
print(
"""If 10 teams are competing in a Formula One race, there""",
F"""are {combinations(10, 3)} ways that first, second and""",
"""third place can be awarded.""",
) | 212 |
"""simple docstring"""
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
__a = logging.get_logger(__name__)
enable_full_determinism()
class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_A : List[Any] = UNetaDModel
_A : Union[str, Any] = """sample"""
@property
def lowerCAmelCase_ ( self: str ) -> Tuple:
snake_case_ :List[str] = 4
snake_case_ :Tuple = 3
snake_case_ :Optional[Any] = (32, 32)
snake_case_ :str = floats_tensor((batch_size, num_channels) + sizes ).to(snake_case )
snake_case_ :Union[str, Any] = torch.tensor([10] ).to(snake_case )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase_ ( self: List[str] ) -> Dict:
return (3, 32, 32)
@property
def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[int]:
return (3, 32, 32)
def lowerCAmelCase_ ( self: Optional[int] ) -> Dict:
snake_case_ :Any = {
"""block_out_channels""": (32, 64),
"""down_block_types""": ("""DownBlock2D""", """AttnDownBlock2D"""),
"""up_block_types""": ("""AttnUpBlock2D""", """UpBlock2D"""),
"""attention_head_dim""": 3,
"""out_channels""": 3,
"""in_channels""": 3,
"""layers_per_block""": 2,
"""sample_size""": 32,
}
snake_case_ :Tuple = self.dummy_input
return init_dict, inputs_dict
class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_A : List[str] = UNetaDModel
_A : Union[str, Any] = """sample"""
@property
def lowerCAmelCase_ ( self: str ) -> str:
snake_case_ :List[str] = 4
snake_case_ :Optional[int] = 4
snake_case_ :int = (32, 32)
snake_case_ :Any = floats_tensor((batch_size, num_channels) + sizes ).to(snake_case )
snake_case_ :List[Any] = torch.tensor([10] ).to(snake_case )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[int]:
return (4, 32, 32)
@property
def lowerCAmelCase_ ( self: List[Any] ) -> int:
return (4, 32, 32)
def lowerCAmelCase_ ( self: Union[str, Any] ) -> List[Any]:
snake_case_ :Dict = {
"""sample_size""": 32,
"""in_channels""": 4,
"""out_channels""": 4,
"""layers_per_block""": 2,
"""block_out_channels""": (32, 64),
"""attention_head_dim""": 32,
"""down_block_types""": ("""DownBlock2D""", """DownBlock2D"""),
"""up_block_types""": ("""UpBlock2D""", """UpBlock2D"""),
}
snake_case_ :List[str] = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[Any]:
snake_case_, snake_case_ :List[str] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=snake_case )
self.assertIsNotNone(snake_case )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(snake_case )
snake_case_ :List[str] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def lowerCAmelCase_ ( self: Tuple ) -> Dict:
snake_case_, snake_case_ :Union[str, Any] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=snake_case )
model.to(snake_case )
snake_case_ :Union[str, Any] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def lowerCAmelCase_ ( self: str ) -> Any:
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
snake_case_, snake_case_ :List[str] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=snake_case )
model_accelerate.to(snake_case )
model_accelerate.eval()
snake_case_ :List[Any] = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
snake_case_ :int = noise.to(snake_case )
snake_case_ :str = torch.tensor([10] * noise.shape[0] ).to(snake_case )
snake_case_ :Optional[int] = model_accelerate(snake_case , snake_case )["""sample"""]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
snake_case_, snake_case_ :str = UNetaDModel.from_pretrained(
"""fusing/unet-ldm-dummy-update""" , output_loading_info=snake_case , low_cpu_mem_usage=snake_case )
model_normal_load.to(snake_case )
model_normal_load.eval()
snake_case_ :int = model_normal_load(snake_case , snake_case )["""sample"""]
assert torch_all_close(snake_case , snake_case , rtol=1E-3 )
def lowerCAmelCase_ ( self: Tuple ) -> Any:
snake_case_ :Tuple = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" )
model.eval()
model.to(snake_case )
snake_case_ :Optional[int] = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
snake_case_ :int = noise.to(snake_case )
snake_case_ :List[Any] = torch.tensor([10] * noise.shape[0] ).to(snake_case )
with torch.no_grad():
snake_case_ :Union[str, Any] = model(snake_case , snake_case ).sample
snake_case_ :Optional[int] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
snake_case_ :Dict = torch.tensor([-1_3.3_2_5_8, -2_0.1_1_0_0, -1_5.9_8_7_3, -1_7.6_6_1_7, -2_3.0_5_9_6, -1_7.9_4_1_9, -1_3.3_6_7_5, -1_6.1_8_8_9, -1_2.3_8_0_0] )
# fmt: on
self.assertTrue(torch_all_close(snake_case , snake_case , rtol=1E-3 ) )
class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_A : List[Any] = UNetaDModel
_A : List[Any] = """sample"""
@property
def lowerCAmelCase_ ( self: Union[str, Any] , snake_case: int=(32, 32) ) -> Tuple:
snake_case_ :Union[str, Any] = 4
snake_case_ :Any = 3
snake_case_ :int = floats_tensor((batch_size, num_channels) + sizes ).to(snake_case )
snake_case_ :Any = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=snake_case )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Any:
return (3, 32, 32)
@property
def lowerCAmelCase_ ( self: int ) -> Tuple:
return (3, 32, 32)
def lowerCAmelCase_ ( self: List[str] ) -> Tuple:
snake_case_ :List[Any] = {
"""block_out_channels""": [32, 64, 64, 64],
"""in_channels""": 3,
"""layers_per_block""": 1,
"""out_channels""": 3,
"""time_embedding_type""": """fourier""",
"""norm_eps""": 1E-6,
"""mid_block_scale_factor""": math.sqrt(2.0 ),
"""norm_num_groups""": None,
"""down_block_types""": [
"""SkipDownBlock2D""",
"""AttnSkipDownBlock2D""",
"""SkipDownBlock2D""",
"""SkipDownBlock2D""",
],
"""up_block_types""": [
"""SkipUpBlock2D""",
"""SkipUpBlock2D""",
"""AttnSkipUpBlock2D""",
"""SkipUpBlock2D""",
],
}
snake_case_ :int = self.dummy_input
return init_dict, inputs_dict
@slow
def lowerCAmelCase_ ( self: Optional[Any] ) -> List[Any]:
snake_case_, snake_case_ :List[Any] = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" , output_loading_info=snake_case )
self.assertIsNotNone(snake_case )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(snake_case )
snake_case_ :Any = self.dummy_input
snake_case_ :int = floats_tensor((4, 3) + (256, 256) ).to(snake_case )
snake_case_ :int = noise
snake_case_ :int = model(**snake_case )
assert image is not None, "Make sure output is not None"
@slow
def lowerCAmelCase_ ( self: str ) -> Dict:
snake_case_ :Dict = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" )
model.to(snake_case )
snake_case_ :List[str] = 4
snake_case_ :Optional[int] = 3
snake_case_ :List[str] = (256, 256)
snake_case_ :Tuple = torch.ones((batch_size, num_channels) + sizes ).to(snake_case )
snake_case_ :str = torch.tensor(batch_size * [1E-4] ).to(snake_case )
with torch.no_grad():
snake_case_ :Dict = model(snake_case , snake_case ).sample
snake_case_ :int = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
snake_case_ :Optional[Any] = torch.tensor([-4_8_4_2.8_6_9_1, -6_4_9_9.6_6_3_1, -3_8_0_0.1_9_5_3, -7_9_7_8.2_6_8_6, -1_0_9_8_0.7_1_2_9, -2_0_0_2_8.8_5_3_5, 8_1_4_8.2_8_2_2, 2_3_4_2.2_9_0_5, 5_6_7.7_6_0_8] )
# fmt: on
self.assertTrue(torch_all_close(snake_case , snake_case , rtol=1E-2 ) )
def lowerCAmelCase_ ( self: List[str] ) -> List[Any]:
snake_case_ :Optional[Any] = UNetaDModel.from_pretrained("""fusing/ncsnpp-ffhq-ve-dummy-update""" )
model.to(snake_case )
snake_case_ :Optional[int] = 4
snake_case_ :Optional[Any] = 3
snake_case_ :Optional[Any] = (32, 32)
snake_case_ :Dict = torch.ones((batch_size, num_channels) + sizes ).to(snake_case )
snake_case_ :Any = torch.tensor(batch_size * [1E-4] ).to(snake_case )
with torch.no_grad():
snake_case_ :str = model(snake_case , snake_case ).sample
snake_case_ :int = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
snake_case_ :int = torch.tensor([-0.0_3_2_5, -0.0_9_0_0, -0.0_8_6_9, -0.0_3_3_2, -0.0_7_2_5, -0.0_2_7_0, -0.0_1_0_1, 0.0_2_2_7, 0.0_2_5_6] )
# fmt: on
self.assertTrue(torch_all_close(snake_case , snake_case , rtol=1E-2 ) )
def lowerCAmelCase_ ( self: Dict ) -> Optional[Any]:
# not required for this model
pass
| 66 | 0 |
def lowerCamelCase__ ( A__ : List[Any] = 100 ):
'''simple docstring'''
__lowerCamelCase = set()
__lowerCamelCase = 0
__lowerCamelCase = n + 1 # maximum limit
for a in range(2 , _lowercase ):
for b in range(2 , _lowercase ):
__lowerCamelCase = a**b # calculates the current power
collect_powers.add(_lowercase ) # adds the result to the set
return len(_lowercase )
if __name__ == "__main__":
print('Number of terms ', solution(int(str(input()).strip())))
| 12 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__a = {
"configuration_mask2former": [
"MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Mask2FormerConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["Mask2FormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"Mask2FormerForUniversalSegmentation",
"Mask2FormerModel",
"Mask2FormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 66 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : int = logging.get_logger(__name__)
a__ : Optional[int] = {
'''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''',
}
class a_ ( _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = """switch_transformers"""
__SCREAMING_SNAKE_CASE : Tuple = ["""past_key_values"""]
__SCREAMING_SNAKE_CASE : Tuple = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self , _lowerCamelCase=3_2128 , _lowerCamelCase=768 , _lowerCamelCase=64 , _lowerCamelCase=2048 , _lowerCamelCase=64 , _lowerCamelCase=12 , _lowerCamelCase=3 , _lowerCamelCase=12 , _lowerCamelCase=3 , _lowerCamelCase=12 , _lowerCamelCase=8 , _lowerCamelCase=False , _lowerCamelCase=0.0_1 , _lowerCamelCase="float32" , _lowerCamelCase=False , _lowerCamelCase=32 , _lowerCamelCase=128 , _lowerCamelCase=0.1 , _lowerCamelCase=1e-6 , _lowerCamelCase=0.0_0_1 , _lowerCamelCase=0.0_0_1 , _lowerCamelCase=1.0 , _lowerCamelCase="relu" , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=0 , _lowerCamelCase=1 , **_lowerCamelCase , ) ->Dict:
SCREAMING_SNAKE_CASE : List[str] = vocab_size
SCREAMING_SNAKE_CASE : List[str] = d_model
SCREAMING_SNAKE_CASE : Optional[Any] = d_kv
SCREAMING_SNAKE_CASE : Union[str, Any] = d_ff
SCREAMING_SNAKE_CASE : Union[str, Any] = num_sparse_encoder_layers
SCREAMING_SNAKE_CASE : Any = num_layers
SCREAMING_SNAKE_CASE : Optional[Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
SCREAMING_SNAKE_CASE : List[Any] = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
SCREAMING_SNAKE_CASE : Optional[int] = self.num_layers // self.num_sparse_encoder_layers
else:
SCREAMING_SNAKE_CASE : int = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
SCREAMING_SNAKE_CASE : Tuple = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
SCREAMING_SNAKE_CASE : Any = self.num_decoder_layers # HACK: this will create 0 sparse layers
SCREAMING_SNAKE_CASE : int = num_heads
SCREAMING_SNAKE_CASE : Any = num_experts
SCREAMING_SNAKE_CASE : Optional[Any] = expert_capacity
SCREAMING_SNAKE_CASE : int = router_bias
SCREAMING_SNAKE_CASE : Optional[Any] = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" )
SCREAMING_SNAKE_CASE : str = router_dtype
SCREAMING_SNAKE_CASE : Optional[Any] = router_ignore_padding_tokens
SCREAMING_SNAKE_CASE : str = relative_attention_num_buckets
SCREAMING_SNAKE_CASE : Tuple = relative_attention_max_distance
SCREAMING_SNAKE_CASE : List[str] = dropout_rate
SCREAMING_SNAKE_CASE : List[str] = layer_norm_epsilon
SCREAMING_SNAKE_CASE : Dict = initializer_factor
SCREAMING_SNAKE_CASE : Tuple = feed_forward_proj
SCREAMING_SNAKE_CASE : List[str] = use_cache
SCREAMING_SNAKE_CASE : List[str] = add_router_probs
SCREAMING_SNAKE_CASE : Optional[Any] = router_z_loss_coef
SCREAMING_SNAKE_CASE : List[str] = router_aux_loss_coef
SCREAMING_SNAKE_CASE : Dict = self.feed_forward_proj.split('''-''' )
SCREAMING_SNAKE_CASE : int = act_info[-1]
SCREAMING_SNAKE_CASE : Union[str, Any] = act_info[0] == """gated"""
if len(_lowerCamelCase ) > 1 and act_info[0] != "gated" or len(_lowerCamelCase ) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
SCREAMING_SNAKE_CASE : Tuple = """gelu_new"""
super().__init__(
pad_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , is_encoder_decoder=_lowerCamelCase , **_lowerCamelCase , )
| 313 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_A : str = StableDiffusionSAGPipeline
_A : Optional[Any] = TEXT_TO_IMAGE_PARAMS
_A : Any = TEXT_TO_IMAGE_BATCH_PARAMS
_A : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
_A : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
_A : List[str] = False
def lowerCAmelCase_ ( self: Optional[Any] ) -> str:
torch.manual_seed(0 )
snake_case_ :Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
snake_case_ :Any = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=snake_case , set_alpha_to_one=snake_case , )
torch.manual_seed(0 )
snake_case_ :Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
snake_case_ :Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
snake_case_ :Tuple = CLIPTextModel(snake_case )
snake_case_ :str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
snake_case_ :Dict = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCAmelCase_ ( self: List[str] , snake_case: Tuple , snake_case: List[str]=0 ) -> str:
if str(snake_case ).startswith("""mps""" ):
snake_case_ :Tuple = torch.manual_seed(snake_case )
else:
snake_case_ :Optional[int] = torch.Generator(device=snake_case ).manual_seed(snake_case )
snake_case_ :Any = {
"""prompt""": """.""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 1.0,
"""sag_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase_ ( self: Optional[int] ) -> str:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self: int ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self: int ) -> List[str]:
snake_case_ :Any = StableDiffusionSAGPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" )
snake_case_ :int = sag_pipe.to(snake_case )
sag_pipe.set_progress_bar_config(disable=snake_case )
snake_case_ :Union[str, Any] = """."""
snake_case_ :str = torch.manual_seed(0 )
snake_case_ :str = sag_pipe(
[prompt] , generator=snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" )
snake_case_ :List[Any] = output.images
snake_case_ :Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
snake_case_ :List[Any] = np.array([0.1_5_6_8, 0.1_7_3_8, 0.1_6_9_5, 0.1_6_9_3, 0.1_5_0_7, 0.1_7_0_5, 0.1_5_4_7, 0.1_7_5_1, 0.1_9_4_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def lowerCAmelCase_ ( self: Dict ) -> str:
snake_case_ :Tuple = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
snake_case_ :Optional[int] = sag_pipe.to(snake_case )
sag_pipe.set_progress_bar_config(disable=snake_case )
snake_case_ :Tuple = """."""
snake_case_ :Union[str, Any] = torch.manual_seed(0 )
snake_case_ :Tuple = sag_pipe(
[prompt] , generator=snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" )
snake_case_ :Optional[int] = output.images
snake_case_ :Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
snake_case_ :Tuple = np.array([0.3_4_5_9, 0.2_8_7_6, 0.2_5_3_7, 0.3_0_0_2, 0.2_6_7_1, 0.2_1_6_0, 0.3_0_2_6, 0.2_2_6_2, 0.2_3_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def lowerCAmelCase_ ( self: List[str] ) -> List[str]:
snake_case_ :Optional[int] = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
snake_case_ :int = sag_pipe.to(snake_case )
sag_pipe.set_progress_bar_config(disable=snake_case )
snake_case_ :Tuple = """."""
snake_case_ :Optional[int] = torch.manual_seed(0 )
snake_case_ :List[str] = sag_pipe(
[prompt] , width=768 , height=512 , generator=snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" , )
snake_case_ :Optional[Any] = output.images
assert image.shape == (1, 512, 768, 3)
| 66 | 0 |
import re
def _a ( lowerCamelCase ):
lowerCamelCase : Optional[int] = re.compile(
R"""^(?:0|94|\+94|0{2}94)""" R"""7(0|1|2|4|5|6|7|8)""" R"""(-| |)""" R"""\d{7}$""" )
return bool(re.search(_lowercase, _lowercase ) )
if __name__ == "__main__":
_lowerCamelCase ="""0094702343221"""
print(is_sri_lankan_phone_number(phone))
| 287 |
"""simple docstring"""
from __future__ import annotations
from collections import Counter
from random import random
class lowerCamelCase :
'''simple docstring'''
def __init__( self: Tuple ) -> Optional[Any]:
snake_case_ :Optional[int] = {}
def lowerCAmelCase_ ( self: Dict , snake_case: str ) -> None:
snake_case_ :str = {}
def lowerCAmelCase_ ( self: Optional[int] , snake_case: str , snake_case: str , snake_case: float ) -> None:
if nodea not in self.connections:
self.add_node(snake_case )
if nodea not in self.connections:
self.add_node(snake_case )
snake_case_ :Dict = probability
def lowerCAmelCase_ ( self: List[Any] ) -> list[str]:
return list(self.connections )
def lowerCAmelCase_ ( self: Any , snake_case: str ) -> str:
snake_case_ :Optional[Any] = 0
snake_case_ :List[str] = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def A_ ( _lowercase, _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :List[str] = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(_lowercase, _lowercase, _lowercase )
snake_case_ :int = Counter(graph.get_nodes() )
snake_case_ :Optional[Any] = start
for _ in range(_lowercase ):
snake_case_ :Tuple = graph.transition(_lowercase )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 66 | 0 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase_ = {
'''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''],
'''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''],
'''processing_mctct''': ['''MCTCTProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MCTCTForCTC''',
'''MCTCTModel''',
'''MCTCTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 244 |
"""simple docstring"""
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
__a = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
__a = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
__a = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
__a = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__a = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
__a = [
("pretraining", "MODEL_FOR_PRETRAINING_MAPPING_NAMES", "AutoModelForPreTraining"),
("feature-extraction", "MODEL_MAPPING_NAMES", "AutoModel"),
("audio-classification", "MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioClassification"),
("text-generation", "MODEL_FOR_CAUSAL_LM_MAPPING_NAMES", "AutoModelForCausalLM"),
("automatic-speech-recognition", "MODEL_FOR_CTC_MAPPING_NAMES", "AutoModelForCTC"),
("image-classification", "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForImageClassification"),
("image-segmentation", "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES", "AutoModelForImageSegmentation"),
("fill-mask", "MODEL_FOR_MASKED_LM_MAPPING_NAMES", "AutoModelForMaskedLM"),
("object-detection", "MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForObjectDetection"),
(
"zero-shot-object-detection",
"MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES",
"AutoModelForZeroShotObjectDetection",
),
("question-answering", "MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForQuestionAnswering"),
("text2text-generation", "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES", "AutoModelForSeq2SeqLM"),
("text-classification", "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForSequenceClassification"),
("automatic-speech-recognition", "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES", "AutoModelForSpeechSeq2Seq"),
(
"table-question-answering",
"MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForTableQuestionAnswering",
),
("token-classification", "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES", "AutoModelForTokenClassification"),
("multiple-choice", "MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES", "AutoModelForMultipleChoice"),
(
"next-sentence-prediction",
"MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES",
"AutoModelForNextSentencePrediction",
),
(
"audio-frame-classification",
"MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForAudioFrameClassification",
),
("audio-xvector", "MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES", "AutoModelForAudioXVector"),
(
"document-question-answering",
"MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForDocumentQuestionAnswering",
),
(
"visual-question-answering",
"MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForVisualQuestionAnswering",
),
("image-to-text", "MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES", "AutoModelForVision2Seq"),
(
"zero-shot-image-classification",
"MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForZeroShotImageClassification",
),
("depth-estimation", "MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES", "AutoModelForDepthEstimation"),
("video-classification", "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForVideoClassification"),
("mask-generation", "MODEL_FOR_MASK_GENERATION_MAPPING_NAMES", "AutoModelForMaskGeneration"),
]
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :Any = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""", _lowercase )
return [m.group(0 ) for m in matches]
def A_ ( ):
'''simple docstring'''
snake_case_ :int = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
snake_case_ :Dict = {
config.replace("""Config""", """""" ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
snake_case_ :Optional[Any] = collections.defaultdict(_lowercase )
snake_case_ :int = collections.defaultdict(_lowercase )
snake_case_ :List[str] = collections.defaultdict(_lowercase )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(_lowercase ):
snake_case_ :int = None
if _re_tf_models.match(_lowercase ) is not None:
snake_case_ :int = tf_models
snake_case_ :List[str] = _re_tf_models.match(_lowercase ).groups()[0]
elif _re_flax_models.match(_lowercase ) is not None:
snake_case_ :List[Any] = flax_models
snake_case_ :Any = _re_flax_models.match(_lowercase ).groups()[0]
elif _re_pt_models.match(_lowercase ) is not None:
snake_case_ :Optional[Any] = pt_models
snake_case_ :int = _re_pt_models.match(_lowercase ).groups()[0]
if lookup_dict is not None:
while len(_lowercase ) > 0:
if attr_name in model_prefix_to_model_type:
snake_case_ :Optional[int] = True
break
# Try again after removing the last word in the name
snake_case_ :Optional[Any] = """""".join(camel_case_split(_lowercase )[:-1] )
snake_case_ :Optional[int] = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
snake_case_ :Optional[Any] = list(_lowercase )
all_models.sort()
snake_case_ :Optional[int] = {"""model_type""": all_models}
snake_case_ :Optional[int] = [pt_models[t] for t in all_models]
snake_case_ :Any = [tf_models[t] for t in all_models]
snake_case_ :Dict = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
snake_case_ :Dict = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
snake_case_ :Optional[Any] = """AutoProcessor"""
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
snake_case_ :Tuple = """AutoTokenizer"""
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
snake_case_ :Tuple = """AutoFeatureExtractor"""
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
snake_case_ :str = """AutoTokenizer"""
snake_case_ :int = [processors[t] for t in all_models]
return pd.DataFrame(_lowercase )
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :List[Any] = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
snake_case_ :Optional[int] = [model_mapping, f"""TF_{model_mapping}""", f"""FLAX_{model_mapping}"""]
snake_case_ :List[str] = [auto_class, f"""TF_{auto_class}""", f"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(_lowercase, _lowercase, _lowercase ):
# The type of pipeline may not exist in this framework
if not hasattr(_lowercase, _lowercase ):
continue
# First extract all model_names
snake_case_ :Tuple = []
for name in getattr(_lowercase, _lowercase ).values():
if isinstance(_lowercase, _lowercase ):
model_names.append(_lowercase )
else:
model_names.extend(list(_lowercase ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :List[Any] = get_frameworks_table()
snake_case_ :str = Dataset.from_pandas(_lowercase )
snake_case_ :List[Any] = hf_hub_download(
"""huggingface/transformers-metadata""", """pipeline_tags.json""", repo_type="""dataset""", token=_lowercase )
snake_case_ :List[str] = Dataset.from_json(_lowercase )
snake_case_ :int = {
tags_dataset[i]["""model_class"""]: (tags_dataset[i]["""pipeline_tag"""], tags_dataset[i]["""auto_class"""])
for i in range(len(_lowercase ) )
}
snake_case_ :Optional[int] = update_pipeline_and_auto_class_table(_lowercase )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
snake_case_ :Tuple = sorted(table.keys() )
snake_case_ :Tuple = pd.DataFrame(
{
"""model_class""": model_classes,
"""pipeline_tag""": [table[m][0] for m in model_classes],
"""auto_class""": [table[m][1] for m in model_classes],
} )
snake_case_ :Union[str, Any] = Dataset.from_pandas(_lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(_lowercase, """frameworks.json""" ) )
tags_dataset.to_json(os.path.join(_lowercase, """pipeline_tags.json""" ) )
if commit_sha is not None:
snake_case_ :Union[str, Any] = (
f"""Update with commit {commit_sha}\n\nSee: """
f"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
snake_case_ :List[Any] = """Update"""
upload_folder(
repo_id="""huggingface/transformers-metadata""", folder_path=_lowercase, repo_type="""dataset""", token=_lowercase, commit_message=_lowercase, )
def A_ ( ):
'''simple docstring'''
snake_case_ :List[Any] = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
snake_case_ :Dict = transformers_module.pipelines.SUPPORTED_TASKS
snake_case_ :List[str] = []
for key in pipeline_tasks:
if key not in in_table:
snake_case_ :int = pipeline_tasks[key]["""pt"""]
if isinstance(_lowercase, (list, tuple) ):
snake_case_ :Any = model[0]
snake_case_ :str = model.__name__
if model not in in_table.values():
missing.append(_lowercase )
if len(_lowercase ) > 0:
snake_case_ :Optional[int] = """, """.join(_lowercase )
raise ValueError(
"""The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside """
f"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument("--token", type=str, help="The token to use to push to the transformers-metadata dataset.")
parser.add_argument("--commit_sha", type=str, help="The sha of the commit going with this update.")
parser.add_argument("--check-only", action="store_true", help="Activate to just check all pipelines are present.")
__a = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 66 | 0 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
lowerCamelCase_ : Any = """examples/"""
lowerCamelCase_ : Optional[Any] = {
"""examples""": (re.compile(R"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(R"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(R"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), R"""\1version=\"VERSION\","""),
"""doc""": (re.compile(R"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
lowerCamelCase_ : Any = {
"""init""": """src/transformers/__init__.py""",
"""setup""": """setup.py""",
}
lowerCamelCase_ : Optional[Any] = """README.md"""
def _A ( lowercase , lowercase , lowercase ):
"""simple docstring"""
with open(_lowercase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
a =f.read()
a =REPLACE_PATTERNS[pattern]
a =replace.replace('''VERSION''' , _lowercase )
a =re_pattern.sub(_lowercase , _lowercase )
with open(_lowercase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(_lowercase )
def _A ( lowercase ):
"""simple docstring"""
for folder, directories, fnames in os.walk(_lowercase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(_lowercase , _lowercase ) , _lowercase , pattern='''examples''' )
def _A ( lowercase , lowercase=False ):
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_lowercase , _lowercase , _lowercase )
if not patch:
update_version_in_examples(_lowercase )
def _A ( ):
"""simple docstring"""
a ="""🤗 Transformers currently provides the following architectures"""
a ="""1. Want to contribute a new model?"""
with open(_lowercase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
a =f.readlines()
# Find the start of the list.
a =0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
a =start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
a =lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(_lowercase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(_lowercase )
def _A ( ):
"""simple docstring"""
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
a =f.read()
a =REPLACE_PATTERNS["""init"""][0].search(_lowercase ).groups()[0]
return packaging.version.parse(_lowercase )
def _A ( lowercase=False ):
"""simple docstring"""
a =get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
a =default_version.base_version
elif patch:
a =f'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
a =f'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
a =input(f'''Which version are you releasing? [{default_version}]''' )
if len(_lowercase ) == 0:
a =default_version
print(f'''Updating version to {version}.''' )
global_version_update(_lowercase , patch=_lowercase )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def _A ( ):
"""simple docstring"""
a =get_version()
a =f'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
a =current_version.base_version
# Check with the user we got that right.
a =input(f'''Which version are we developing now? [{dev_version}]''' )
if len(_lowercase ) == 0:
a =dev_version
print(f'''Updating version to {version}.''' )
global_version_update(_lowercase )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
lowerCamelCase_ : List[str] = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
lowerCamelCase_ : List[str] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work() | 81 |
"""simple docstring"""
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
__a = logging.getLogger(__name__)
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
_A : Union[str, Any] = """token-classification"""
def __init__( self: Any , snake_case: Tuple ) -> List[Any]:
if type(snake_case ) == dict:
snake_case_ :Optional[int] = Namespace(**snake_case )
snake_case_ :Optional[int] = import_module("""tasks""" )
try:
snake_case_ :Any = getattr(snake_case , hparams.task_type )
snake_case_ :TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"""Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
snake_case_ :Any = self.token_classification_task.get_labels(hparams.labels )
snake_case_ :str = CrossEntropyLoss().ignore_index
super().__init__(snake_case , len(self.labels ) , self.mode )
def lowerCAmelCase_ ( self: Dict , **snake_case: List[Any] ) -> Any:
return self.model(**snake_case )
def lowerCAmelCase_ ( self: str , snake_case: Tuple , snake_case: List[Any] ) -> Optional[int]:
snake_case_ :List[str] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
snake_case_ :List[str] = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
snake_case_ :Optional[Any] = self(**snake_case )
snake_case_ :List[str] = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def lowerCAmelCase_ ( self: int ) -> Dict:
snake_case_ :List[Any] = self.hparams
for mode in ["train", "dev", "test"]:
snake_case_ :Optional[int] = self._feature_file(snake_case )
if os.path.exists(snake_case ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , snake_case )
snake_case_ :Optional[int] = torch.load(snake_case )
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir )
snake_case_ :Optional[int] = self.token_classification_task.read_examples_from_file(args.data_dir , snake_case )
snake_case_ :Any = self.token_classification_task.convert_examples_to_features(
snake_case , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["""xlnet"""] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["""xlnet"""] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=snake_case , pad_on_left=bool(self.config.model_type in ["""xlnet"""] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("""Saving features into cached file %s""" , snake_case )
torch.save(snake_case , snake_case )
def lowerCAmelCase_ ( self: Optional[int] , snake_case: int , snake_case: int , snake_case: bool = False ) -> DataLoader:
snake_case_ :int = self._feature_file(snake_case )
logger.info("""Loading features from cached file %s""" , snake_case )
snake_case_ :str = torch.load(snake_case )
snake_case_ :Dict = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
snake_case_ :str = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
snake_case_ :List[Any] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
snake_case_ :List[str] = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
snake_case_ :Any = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(snake_case , snake_case , snake_case , snake_case ) , batch_size=snake_case )
def lowerCAmelCase_ ( self: List[str] , snake_case: Dict , snake_case: Union[str, Any] ) -> List[str]:
"""Compute validation""" ""
snake_case_ :List[str] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
snake_case_ :Dict = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
snake_case_ :Dict = self(**snake_case )
snake_case_, snake_case_ :Dict = outputs[:2]
snake_case_ :Union[str, Any] = logits.detach().cpu().numpy()
snake_case_ :List[Any] = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def lowerCAmelCase_ ( self: List[Any] , snake_case: int ) -> Tuple:
snake_case_ :Union[str, Any] = torch.stack([x["""val_loss"""] for x in outputs] ).mean()
snake_case_ :Tuple = np.concatenate([x["""pred"""] for x in outputs] , axis=0 )
snake_case_ :Tuple = np.argmax(snake_case , axis=2 )
snake_case_ :List[str] = np.concatenate([x["""target"""] for x in outputs] , axis=0 )
snake_case_ :Optional[Any] = dict(enumerate(self.labels ) )
snake_case_ :Dict = [[] for _ in range(out_label_ids.shape[0] )]
snake_case_ :Dict = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
snake_case_ :str = {
"""val_loss""": val_loss_mean,
"""accuracy_score""": accuracy_score(snake_case , snake_case ),
"""precision""": precision_score(snake_case , snake_case ),
"""recall""": recall_score(snake_case , snake_case ),
"""f1""": fa_score(snake_case , snake_case ),
}
snake_case_ :List[Any] = dict(results.items() )
snake_case_ :Union[str, Any] = results
return ret, preds_list, out_label_list
def lowerCAmelCase_ ( self: Optional[Any] , snake_case: Dict ) -> Optional[Any]:
# when stable
snake_case_, snake_case_, snake_case_ :Tuple = self._eval_end(snake_case )
snake_case_ :str = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def lowerCAmelCase_ ( self: Tuple , snake_case: Optional[int] ) -> Any:
# updating to test_epoch_end instead of deprecated test_end
snake_case_, snake_case_, snake_case_ :Any = self._eval_end(snake_case )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
snake_case_ :Optional[int] = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def lowerCAmelCase_ ( snake_case: Any , snake_case: int ) -> Dict:
# Add NER specific options
BaseTransformer.add_model_specific_args(snake_case , snake_case )
parser.add_argument(
"""--task_type""" , default="""NER""" , type=snake_case , help="""Task type to fine tune in training (e.g. NER, POS, etc)""" )
parser.add_argument(
"""--max_seq_length""" , default=128 , type=snake_case , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--labels""" , default="""""" , type=snake_case , help="""Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=snake_case , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
return parser
if __name__ == "__main__":
__a = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
__a = NERTransformer.add_model_specific_args(parser, os.getcwd())
__a = parser.parse_args()
__a = NERTransformer(args)
__a = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
__a = sorted(glob.glob(os.path.join(args.output_dir, "checkpoint-epoch=*.ckpt"), recursive=True))
__a = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 66 | 0 |
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
A__: Any = 100
A__: str = set(range(3, NUM_PRIMES, 2))
primes.add(2)
A__: List[str] = 42
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=100 )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ) -> List[Any]:
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
_a : set[int] =set()
_a : int
_a : int
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[str] = 5000 ) -> Tuple:
for number_to_partition in range(1 ,_lowercase ):
if len(partition(_lowercase ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F"{solution() = }")
| 276 |
"""simple docstring"""
from math import factorial
class lowerCamelCase :
'''simple docstring'''
def __init__( self: Optional[int] , snake_case: Dict , snake_case: int ) -> Tuple:
snake_case_ :List[Any] = real
if isinstance(snake_case , snake_case ):
snake_case_ :Tuple = [1] * rank
else:
snake_case_ :Optional[Any] = rank
def __repr__( self: List[str] ) -> Tuple:
return (
f"""{self.real}+"""
f"""{'+'.join(str(snake_case )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"""
)
def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[int]:
snake_case_ :Any = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , snake_case )
def __add__( self: Optional[int] , snake_case: Dict ) -> List[str]:
if not isinstance(snake_case , snake_case ):
return Dual(self.real + other , self.duals )
snake_case_ :List[Any] = self.duals.copy()
snake_case_ :Tuple = other.duals.copy()
if len(snake_case ) > len(snake_case ):
o_dual.extend([1] * (len(snake_case ) - len(snake_case )) )
elif len(snake_case ) < len(snake_case ):
s_dual.extend([1] * (len(snake_case ) - len(snake_case )) )
snake_case_ :Dict = []
for i in range(len(snake_case ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , snake_case )
_A : str = __add__
def __sub__( self: Tuple , snake_case: Union[str, Any] ) -> Tuple:
return self + other * -1
def __mul__( self: str , snake_case: Tuple ) -> Optional[Any]:
if not isinstance(snake_case , snake_case ):
snake_case_ :Dict = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , snake_case )
snake_case_ :int = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , snake_case )
_A : int = __mul__
def __truediv__( self: List[str] , snake_case: List[str] ) -> List[str]:
if not isinstance(snake_case , snake_case ):
snake_case_ :Optional[Any] = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , snake_case )
raise ValueError
def __floordiv__( self: int , snake_case: List[Any] ) -> Any:
if not isinstance(snake_case , snake_case ):
snake_case_ :Optional[int] = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , snake_case )
raise ValueError
def __pow__( self: Optional[Any] , snake_case: Optional[int] ) -> List[Any]:
if n < 0 or isinstance(snake_case , snake_case ):
raise ValueError("""power must be a positive integer""" )
if n == 0:
return 1
if n == 1:
return self
snake_case_ :str = self
for _ in range(n - 1 ):
x *= self
return x
def A_ ( _lowercase, _lowercase, _lowercase ):
'''simple docstring'''
if not callable(_lowercase ):
raise ValueError("""differentiate() requires a function as input for func""" )
if not isinstance(_lowercase, (float, int) ):
raise ValueError("""differentiate() requires a float as input for position""" )
if not isinstance(_lowercase, _lowercase ):
raise ValueError("""differentiate() requires an int as input for order""" )
snake_case_ :Optional[Any] = Dual(_lowercase, 1 )
snake_case_ :List[Any] = func(_lowercase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def A_ ( _lowercase ):
'''simple docstring'''
return y**2 * y**4
print(differentiate(f, 9, 2))
| 66 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__a = {
'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'],
'configuration_data2vec_text': [
'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecTextConfig',
'Data2VecTextOnnxConfig',
],
'configuration_data2vec_vision': [
'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecVisionConfig',
'Data2VecVisionOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecAudioForAudioFrameClassification',
'Data2VecAudioForCTC',
'Data2VecAudioForSequenceClassification',
'Data2VecAudioForXVector',
'Data2VecAudioModel',
'Data2VecAudioPreTrainedModel',
]
__a = [
'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecTextForCausalLM',
'Data2VecTextForMaskedLM',
'Data2VecTextForMultipleChoice',
'Data2VecTextForQuestionAnswering',
'Data2VecTextForSequenceClassification',
'Data2VecTextForTokenClassification',
'Data2VecTextModel',
'Data2VecTextPreTrainedModel',
]
__a = [
'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecVisionForImageClassification',
'Data2VecVisionForMaskedImageModeling',
'Data2VecVisionForSemanticSegmentation',
'Data2VecVisionModel',
'Data2VecVisionPreTrainedModel',
]
if is_tf_available():
__a = [
'TFData2VecVisionForImageClassification',
'TFData2VecVisionForSemanticSegmentation',
'TFData2VecVisionModel',
'TFData2VecVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 145 |
"""simple docstring"""
from __future__ import annotations
__a = 10
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :Union[str, Any] = 1
snake_case_ :List[str] = max(_lowercase )
while placement <= max_digit:
# declare and initialize empty buckets
snake_case_ :list[list] = [[] for _ in range(_lowercase )]
# split list_of_ints between the buckets
for i in list_of_ints:
snake_case_ :Any = int((i / placement) % RADIX )
buckets[tmp].append(_lowercase )
# put each buckets' contents into list_of_ints
snake_case_ :Optional[Any] = 0
for b in range(_lowercase ):
for i in buckets[b]:
snake_case_ :Union[str, Any] = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 66 | 0 |
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Any] = 100 ):
'''simple docstring'''
_UpperCAmelCase = (n * (n + 1) // 2) ** 2
_UpperCAmelCase = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 260 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__a = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["ReformerTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["ReformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
"ReformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 66 | 0 |
import qiskit
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> str:
lowerCAmelCase_ : int = qiskit.Aer.get_backend('''aer_simulator''' )
lowerCAmelCase_ : List[str] = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
lowerCAmelCase_ : Tuple = qiskit.execute(_lowercase , _lowercase , shots=1_000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(_lowercase )
if __name__ == "__main__":
_UpperCAmelCase : Tuple =half_adder(1, 1)
print(f"""Half Adder Output Qubit Counts: {counts}""") | 262 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self: List[Any] ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowerCAmelCase_ ( self: Tuple ) -> Any:
snake_case_, snake_case_ :List[str] = FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-canny""" , from_pt=snake_case , dtype=jnp.bfloataa )
snake_case_, snake_case_ :Union[str, Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , controlnet=snake_case , from_pt=snake_case , dtype=jnp.bfloataa )
snake_case_ :Union[str, Any] = controlnet_params
snake_case_ :Union[str, Any] = """bird"""
snake_case_ :List[Any] = jax.device_count()
snake_case_ :List[Any] = pipe.prepare_text_inputs([prompts] * num_samples )
snake_case_ :List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" )
snake_case_ :List[str] = pipe.prepare_image_inputs([canny_image] * num_samples )
snake_case_ :Any = jax.random.PRNGKey(0 )
snake_case_ :List[str] = jax.random.split(snake_case , jax.device_count() )
snake_case_ :List[Any] = replicate(snake_case )
snake_case_ :List[str] = shard(snake_case )
snake_case_ :str = shard(snake_case )
snake_case_ :Dict = pipe(
prompt_ids=snake_case , image=snake_case , params=snake_case , prng_seed=snake_case , num_inference_steps=50 , jit=snake_case , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
snake_case_ :str = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
snake_case_ :Union[str, Any] = images[0, 253:256, 253:256, -1]
snake_case_ :str = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case_ :Dict = jnp.array(
[0.1_6_7_9_6_9, 0.1_1_6_6_9_9, 0.0_8_1_5_4_3, 0.1_5_4_2_9_7, 0.1_3_2_8_1_2, 0.1_0_8_8_8_7, 0.1_6_9_9_2_2, 0.1_6_9_9_2_2, 0.2_0_5_0_7_8] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowerCAmelCase_ ( self: int ) -> Dict:
snake_case_, snake_case_ :List[Any] = FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-openpose""" , from_pt=snake_case , dtype=jnp.bfloataa )
snake_case_, snake_case_ :int = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , controlnet=snake_case , from_pt=snake_case , dtype=jnp.bfloataa )
snake_case_ :str = controlnet_params
snake_case_ :Optional[int] = """Chef in the kitchen"""
snake_case_ :Union[str, Any] = jax.device_count()
snake_case_ :Any = pipe.prepare_text_inputs([prompts] * num_samples )
snake_case_ :str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png""" )
snake_case_ :Optional[Any] = pipe.prepare_image_inputs([pose_image] * num_samples )
snake_case_ :str = jax.random.PRNGKey(0 )
snake_case_ :str = jax.random.split(snake_case , jax.device_count() )
snake_case_ :Tuple = replicate(snake_case )
snake_case_ :str = shard(snake_case )
snake_case_ :int = shard(snake_case )
snake_case_ :List[str] = pipe(
prompt_ids=snake_case , image=snake_case , params=snake_case , prng_seed=snake_case , num_inference_steps=50 , jit=snake_case , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
snake_case_ :str = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
snake_case_ :int = images[0, 253:256, 253:256, -1]
snake_case_ :Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case_ :Optional[int] = jnp.array(
[[0.2_7_1_4_8_4, 0.2_6_1_7_1_9, 0.2_7_5_3_9_1, 0.2_7_7_3_4_4, 0.2_7_9_2_9_7, 0.2_9_1_0_1_6, 0.2_9_4_9_2_2, 0.3_0_2_7_3_4, 0.3_0_2_7_3_4]] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 66 | 0 |
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def __snake_case ( __UpperCamelCase : str = "isbn/0140328726" ):
"""simple docstring"""
A_ = olid.strip().strip("/" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("/" ) != 1:
A_ = f'''{olid} is not a valid Open Library olid'''
raise ValueError(_lowercase )
return requests.get(f'''https://openlibrary.org/{new_olid}.json''' ).json()
def __snake_case ( __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
A_ = {
"""title""": """Title""",
"""publish_date""": """Publish date""",
"""authors""": """Authors""",
"""number_of_pages""": """Number of pages:""",
"""first_sentence""": """First sentence""",
"""isbn_10""": """ISBN (10)""",
"""isbn_13""": """ISBN (13)""",
}
A_ = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
A_ = [
get_openlibrary_data(author["key"] )["""name"""] for author in data["""Authors"""]
]
A_ = data["""First sentence"""]["""value"""]
for key, value in data.items():
if isinstance(_lowercase ,_lowercase ):
A_ = """, """.join(_lowercase )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
__a :str = input('\nEnter the ISBN code to search (or \'quit\' to stop): ').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F"Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.")
continue
print(F"\nSearching Open Library for ISBN: {isbn}...\n")
try:
__a :Dict = summarize_book(get_openlibrary_data(F"isbn/{isbn}"))
print('\n'.join(F"{key}: {value}" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F"Sorry, there are no results for ISBN: {isbn}.") | 312 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a = {
"configuration_mobilebert": [
"MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"MobileBertConfig",
"MobileBertOnnxConfig",
],
"tokenization_mobilebert": ["MobileBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["MobileBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileBertForMaskedLM",
"MobileBertForMultipleChoice",
"MobileBertForNextSentencePrediction",
"MobileBertForPreTraining",
"MobileBertForQuestionAnswering",
"MobileBertForSequenceClassification",
"MobileBertForTokenClassification",
"MobileBertLayer",
"MobileBertModel",
"MobileBertPreTrainedModel",
"load_tf_weights_in_mobilebert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileBertForMaskedLM",
"TFMobileBertForMultipleChoice",
"TFMobileBertForNextSentencePrediction",
"TFMobileBertForPreTraining",
"TFMobileBertForQuestionAnswering",
"TFMobileBertForSequenceClassification",
"TFMobileBertForTokenClassification",
"TFMobileBertMainLayer",
"TFMobileBertModel",
"TFMobileBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 66 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
lowerCamelCase__ = {
"""configuration_speecht5""": [
"""SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP""",
"""SpeechT5Config""",
"""SpeechT5HifiGanConfig""",
],
"""feature_extraction_speecht5""": ["""SpeechT5FeatureExtractor"""],
"""processing_speecht5""": ["""SpeechT5Processor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""SpeechT5Tokenizer"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SpeechT5ForSpeechToText""",
"""SpeechT5ForSpeechToSpeech""",
"""SpeechT5ForTextToSpeech""",
"""SpeechT5Model""",
"""SpeechT5PreTrainedModel""",
"""SpeechT5HifiGan""",
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 212 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :Union[str, Any] = os.path.join(args.tf_model_dir, """parameters.json""" )
snake_case_ :Any = json.loads(open(_lowercase ).read() )
if not params:
raise ValueError(
f"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith(""".pt""" ):
snake_case_ :Optional[int] = args.output + """.pt"""
snake_case_ :List[str] = OrderedDict()
with tf.device("""/CPU:0""" ):
snake_case_ :Dict = tf.train.load_checkpoint(args.tf_model_dir )
snake_case_ :str = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
snake_case_ :List[Any] = reader.get_tensor(_lowercase ).astype(np.floataa )
if key_name.endswith("""/adam_m""" ) or key_name.endswith("""/adam_v""" ):
continue
if key_name.startswith("""pasts/""" ):
if key_name.startswith("""pasts/mlp""" ):
snake_case_ :Any = int(key_name[9] )
elif key_name.startswith("""pasts/out""" ):
snake_case_ :Optional[int] = 8
snake_case_ :List[str] = """model.sqout.%d.weight""" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
snake_case_ :Optional[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ :List[str] = torch.tensor(_lowercase )
elif key_name.startswith("""model/moe""" ):
snake_case_ :Tuple = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/switch_gating/kernel""" ):
snake_case_ :Union[str, Any] = """model.blocks.%d.feed_forward.mlp.router.classifier.weight""" % player
snake_case_ :Optional[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Optional[Any] = torch.tensor(_lowercase )
elif key_name.endswith("""/softmlp/kernel""" ):
snake_case_ :List[Any] = """model.blocks.%d.feed_forward.soft_bypass_mlp.weight""" % player
snake_case_ :Optional[int] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Optional[Any] = torch.tensor(_lowercase )
elif key_name.endswith("""/wo/kernel""" ) or key_name.endswith("""/wi/kernel""" ):
snake_case_ :Dict = key_name[-9:-7]
for i in range(16 ):
snake_case_ :str = """model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight""" % (player, i, nlayer)
snake_case_ :Tuple = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
snake_case_ :Optional[int] = torch.tensor(_lowercase )
elif key_name.startswith("""model/mlp""" ):
snake_case_ :Optional[int] = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/p1/kernel""" ):
snake_case_ :Union[str, Any] = """model.blocks.%d.feed_forward.mlp.wi.weight""" % player
snake_case_ :Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Optional[Any] = torch.tensor(_lowercase )
elif key_name.endswith("""/p1/bias""" ):
snake_case_ :List[Any] = """model.blocks.%d.feed_forward.mlp.wi.bias""" % player
snake_case_ :str = vnp.copy() # same because it is one dimensional
snake_case_ :Optional[Any] = torch.tensor(_lowercase )
elif key_name.endswith("""/p2/kernel""" ):
snake_case_ :Union[str, Any] = """model.blocks.%d.feed_forward.mlp.wo.weight""" % player
snake_case_ :Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Tuple = torch.tensor(_lowercase )
elif key_name.endswith("""/p2/bias""" ):
snake_case_ :Dict = """model.blocks.%d.feed_forward.mlp.wo.bias""" % player
snake_case_ :Any = vnp.copy() # same because it is one dimensional
snake_case_ :Optional[int] = torch.tensor(_lowercase )
elif key_name.startswith("""model/ln""" ):
snake_case_ :Union[str, Any] = int(key_name[8:].split("""/""" )[0] )
if key_name.endswith("""/b""" ):
snake_case_ :str = """model.blocks.%d.feed_forward.norm.bias""" % player
snake_case_ :Dict = vnp.copy() # same because it is one dimensional
snake_case_ :int = torch.tensor(_lowercase )
elif key_name.endswith("""/g""" ):
snake_case_ :Dict = """model.blocks.%d.feed_forward.norm.weight""" % player
snake_case_ :Dict = vnp.copy() # same because it is one dimensional
snake_case_ :Tuple = torch.tensor(_lowercase )
elif key_name.startswith("""model/att""" ):
snake_case_ :List[str] = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/qkv/kernel""" ):
snake_case_ :Optional[int] = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
snake_case_ :Dict = state[:, 0, :, :]
snake_case_ :int = state[:, 1, :, :]
snake_case_ :List[str] = state[:, 2, :, :]
snake_case_ :str = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Any = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Optional[int] = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ :int = """model.blocks.%d.self_attn.self_attn.q_proj.weight""" % player
snake_case_ :int = torch.tensor(_lowercase )
snake_case_ :Optional[Any] = """model.blocks.%d.self_attn.self_attn.k_proj.weight""" % player
snake_case_ :Dict = torch.tensor(_lowercase )
snake_case_ :Dict = """model.blocks.%d.self_attn.self_attn.v_proj.weight""" % player
snake_case_ :Optional[Any] = torch.tensor(_lowercase )
elif key_name.endswith("""/o/kernel""" ):
snake_case_ :str = """model.blocks.%d.self_attn.self_attn.out_proj.weight""" % player
snake_case_ :str = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Any = torch.tensor(_lowercase )
elif key_name.startswith("""model/an""" ):
snake_case_ :Optional[int] = int(key_name[8:].split("""/""" )[0] )
if key_name.endswith("""/b""" ):
snake_case_ :Any = """model.blocks.%d.self_attn.norm.bias""" % player
snake_case_ :Optional[int] = vnp.copy() # same because it is one dimensional
snake_case_ :Tuple = torch.tensor(_lowercase )
elif key_name.endswith("""/g""" ):
snake_case_ :Union[str, Any] = """model.blocks.%d.self_attn.norm.weight""" % player
snake_case_ :Dict = vnp.copy() # same because it is one dimensional
snake_case_ :Optional[int] = torch.tensor(_lowercase )
elif (
key_name.startswith("""model/wte""" )
or key_name.startswith("""model/wpe""" )
or key_name.startswith("""model/ete""" )
):
snake_case_ :List[Any] = {"""wte""": """embed_tokens""", """wpe""": """position_embeddings""", """ete""": """extra_position_embeddings"""}[
key_name[-3:]
]
snake_case_ :Optional[Any] = """model.%s.weight""" % nlayer
snake_case_ :Any = vnp.copy() # same in embedded
snake_case_ :List[Any] = torch.tensor(_lowercase )
if key_name.startswith("""model/wte""" ):
snake_case_ :Tuple = """lm_head.weight"""
snake_case_ :List[str] = vnp.copy() # same in embedded
snake_case_ :List[Any] = torch.tensor(_lowercase )
elif key_name.startswith("""model/wob""" ):
snake_case_ :str = """final_logits_bias"""
snake_case_ :Any = vnp.copy() # same in embedded
snake_case_ :List[Any] = state.reshape((1, -1) )
snake_case_ :Union[str, Any] = torch.tensor(_lowercase )
elif key_name == "model/dense/kernel":
snake_case_ :str = """model.last_project.weight"""
snake_case_ :Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ :int = torch.tensor(_lowercase )
elif key_name == "model/dense_1/bias":
snake_case_ :Optional[int] = """model.last_project.bias"""
snake_case_ :Tuple = vnp.copy() # same because it is one dimensional
snake_case_ :Any = torch.tensor(_lowercase )
torch.save(_lowercase, args.output )
if __name__ == "__main__":
__a = argparse.ArgumentParser(
description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model")
parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model")
__a = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 66 | 0 |
UpperCAmelCase_ = '2.13.1'
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('3.7'):
raise ImportWarning(
'To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'
'If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
UpperCAmelCase_ = concatenate_datasets
UpperCAmelCase_ = DownloadConfig
UpperCAmelCase_ = DownloadManager
UpperCAmelCase_ = DownloadMode
UpperCAmelCase_ = DownloadConfig
UpperCAmelCase_ = DownloadMode
UpperCAmelCase_ = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 12 |
"""simple docstring"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
__a = pd.read_csv("sample_data.csv", header=None)
__a = df.shape[:1][0]
# If you're using some other dataset input the target column
__a = df.iloc[:, 1:2]
__a = actual_data.values.reshape(len_data, 1)
__a = MinMaxScaler().fit_transform(actual_data)
__a = 10
__a = 5
__a = 20
__a = len_data - periods * look_back
__a = actual_data[:division]
__a = actual_data[division - look_back :]
__a , __a = [], []
__a , __a = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
__a = np.array(train_x)
__a = np.array(test_x)
__a = np.array([list(i.ravel()) for i in train_y])
__a = np.array([list(i.ravel()) for i in test_y])
__a = Sequential()
model.add(LSTM(1_28, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(1_28, 1)))
model.add(Dense(forward_days))
model.compile(loss="mean_squared_error", optimizer="adam")
__a = model.fit(
x_train, y_train, epochs=1_50, verbose=1, shuffle=True, batch_size=4
)
__a = model.predict(x_test)
| 66 | 0 |
class a_ :
"""simple docstring"""
def __init__( self ) ->None:
SCREAMING_SNAKE_CASE : dict[str, TrieNode] = {} # Mapping from char to TrieNode
SCREAMING_SNAKE_CASE : Optional[int] = False
def __lowerCAmelCase ( self , _lowerCamelCase ) ->None:
for word in words:
self.insert(_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->None:
SCREAMING_SNAKE_CASE : Optional[Any] = self
for char in word:
if char not in curr.nodes:
SCREAMING_SNAKE_CASE : Any = TrieNode()
SCREAMING_SNAKE_CASE : Union[str, Any] = curr.nodes[char]
SCREAMING_SNAKE_CASE : str = True
def __lowerCAmelCase ( self , _lowerCamelCase ) ->bool:
SCREAMING_SNAKE_CASE : Any = self
for char in word:
if char not in curr.nodes:
return False
SCREAMING_SNAKE_CASE : Union[str, Any] = curr.nodes[char]
return curr.is_leaf
def __lowerCAmelCase ( self , _lowerCamelCase ) ->None:
def _delete(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> bool:
if index == len(_lowerCamelCase ):
# If word does not exist
if not curr.is_leaf:
return False
SCREAMING_SNAKE_CASE : Any = False
return len(curr.nodes ) == 0
SCREAMING_SNAKE_CASE : Optional[int] = word[index]
SCREAMING_SNAKE_CASE : Tuple = curr.nodes.get(_lowerCamelCase )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
SCREAMING_SNAKE_CASE : Dict = _delete(_lowerCamelCase , _lowerCamelCase , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , _lowerCamelCase , 0 )
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if node.is_leaf:
print(_lowercase , end=''' ''' )
for key, value in node.nodes.items():
print_words(_lowercase , word + key )
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = """banana bananas bandana band apple all beast""".split()
SCREAMING_SNAKE_CASE : Dict = TrieNode()
root.insert_many(_lowercase )
# print_words(root, "")
assert all(root.find(_lowercase ) for word in words )
assert root.find('''banana''' )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
assert root.find('''apple''' )
assert root.find('''all''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
print(str(_lowercase ) , '''works!''' if passes else '''doesn\'t work :(''' )
def UpperCAmelCase_( ):
"""simple docstring"""
assert test_trie()
def UpperCAmelCase_( ):
"""simple docstring"""
print_results('''Testing trie functionality''' , test_trie() )
if __name__ == "__main__":
main()
| 313 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a = {
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 66 | 0 |
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def _a ( lowerCamelCase ):
return ConvertCommand(
args.model_type, args.tf_checkpoint, args.pytorch_dump_output, args.config, args.finetuning_task_name )
_lowerCamelCase ="""\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n"""
class A__ ( _lowerCAmelCase):
@staticmethod
def UpperCamelCase__ ( __magic_name__ ):
lowerCamelCase : Union[str, Any] = parser.add_parser(
"""convert""" , help="""CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.""" , )
train_parser.add_argument("""--model_type""" , type=__magic_name__ , required=__magic_name__ , help="""Model's type.""" )
train_parser.add_argument(
"""--tf_checkpoint""" , type=__magic_name__ , required=__magic_name__ , help="""TensorFlow checkpoint path or folder.""" )
train_parser.add_argument(
"""--pytorch_dump_output""" , type=__magic_name__ , required=__magic_name__ , help="""Path to the PyTorch saved model output.""" )
train_parser.add_argument("""--config""" , type=__magic_name__ , default="""""" , help="""Configuration file path or folder.""" )
train_parser.add_argument(
"""--finetuning_task_name""" , type=__magic_name__ , default=__magic_name__ , help="""Optional fine-tuning task name if the TF model was a finetuned model.""" , )
train_parser.set_defaults(func=__magic_name__ )
def __init__( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , *__magic_name__ , ):
lowerCamelCase : Union[str, Any] = logging.get_logger("""transformers-cli/converting""" )
self._logger.info(F'''Loading model {model_type}''' )
lowerCamelCase : Any = model_type
lowerCamelCase : List[Any] = tf_checkpoint
lowerCamelCase : Union[str, Any] = pytorch_dump_output
lowerCamelCase : Optional[Any] = config
lowerCamelCase : Optional[Any] = finetuning_task_name
def UpperCamelCase__ ( self ):
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__magic_name__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__magic_name__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__magic_name__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(__magic_name__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__magic_name__ )
if "ckpt" in self._tf_checkpoint.lower():
lowerCamelCase : Tuple = self._tf_checkpoint
lowerCamelCase : List[Any] = """"""
else:
lowerCamelCase : Union[str, Any] = self._tf_checkpoint
lowerCamelCase : Union[str, Any] = """"""
convert_transfo_xl_checkpoint_to_pytorch(
__magic_name__ , self._config , self._pytorch_dump_output , __magic_name__ )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__magic_name__ )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__magic_name__ )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
"""--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]""" )
| 287 |
"""simple docstring"""
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :int = XCLIPTextConfig()
# derive patch size from model name
snake_case_ :Union[str, Any] = model_name.find("""patch""" )
snake_case_ :List[str] = int(model_name[start_idx + len("""patch""" ) : start_idx + len("""patch""" ) + 2] )
snake_case_ :Any = XCLIPVisionConfig(patch_size=_lowercase, num_frames=_lowercase )
if "large" in model_name:
snake_case_ :Optional[Any] = 768
snake_case_ :Union[str, Any] = 3072
snake_case_ :Any = 12
snake_case_ :Any = 1024
snake_case_ :str = 4096
snake_case_ :Union[str, Any] = 16
snake_case_ :Union[str, Any] = 24
snake_case_ :Tuple = 768
snake_case_ :Any = 3072
if model_name == "xclip-large-patch14-16-frames":
snake_case_ :Any = 336
snake_case_ :Any = XCLIPConfig.from_text_vision_configs(_lowercase, _lowercase )
if "large" in model_name:
snake_case_ :List[Any] = 768
return config
def A_ ( _lowercase ):
'''simple docstring'''
if name == "token_embedding.weight":
snake_case_ :Optional[Any] = name.replace("""token_embedding.weight""", """text_model.embeddings.token_embedding.weight""" )
if name == "positional_embedding":
snake_case_ :Tuple = name.replace("""positional_embedding""", """text_model.embeddings.position_embedding.weight""" )
if "ln_1" in name:
snake_case_ :Dict = name.replace("""ln_1""", """layer_norm1""" )
if "ln_2" in name:
snake_case_ :str = name.replace("""ln_2""", """layer_norm2""" )
if "c_fc" in name:
snake_case_ :str = name.replace("""c_fc""", """fc1""" )
if "c_proj" in name:
snake_case_ :int = name.replace("""c_proj""", """fc2""" )
if name.startswith("""transformer.resblocks""" ):
snake_case_ :Union[str, Any] = name.replace("""transformer.resblocks""", """text_model.encoder.layers""" )
if "attn.out_proj" in name and "message" not in name:
snake_case_ :Union[str, Any] = name.replace("""attn.out_proj""", """self_attn.out_proj""" )
if "ln_final" in name:
snake_case_ :Union[str, Any] = name.replace("""ln_final""", """text_model.final_layer_norm""" )
# visual encoder
if name == "visual.class_embedding":
snake_case_ :Any = name.replace("""visual.class_embedding""", """vision_model.embeddings.class_embedding""" )
if name == "visual.positional_embedding":
snake_case_ :Optional[int] = name.replace("""visual.positional_embedding""", """vision_model.embeddings.position_embedding.weight""" )
if name.startswith("""visual.transformer.resblocks""" ):
snake_case_ :Union[str, Any] = name.replace("""visual.transformer.resblocks""", """vision_model.encoder.layers""" )
if "visual.conv1" in name:
snake_case_ :int = name.replace("""visual.conv1""", """vision_model.embeddings.patch_embedding""" )
if "visual.ln_pre" in name:
snake_case_ :Any = name.replace("""visual.ln_pre""", """vision_model.pre_layernorm""" )
if "visual.ln_post" in name:
snake_case_ :str = name.replace("""visual.ln_post""", """vision_model.post_layernorm""" )
if "visual.proj" in name:
snake_case_ :Union[str, Any] = name.replace("""visual.proj""", """visual_projection.weight""" )
if "text_projection" in name:
snake_case_ :Dict = name.replace("""text_projection""", """text_projection.weight""" )
# things on top
if "prompts_visual_proj" in name:
snake_case_ :List[str] = name.replace("""prompts_visual_proj""", """prompts_visual_projection""" )
if "prompts_visual_ln" in name:
snake_case_ :Dict = name.replace("""prompts_visual_ln""", """prompts_visual_layernorm""" )
# mit
if name == "mit.positional_embedding":
snake_case_ :str = name.replace("""positional""", """position""" )
if name.startswith("""mit.resblocks""" ):
snake_case_ :Dict = name.replace("""mit.resblocks""", """mit.encoder.layers""" )
# prompts generator
if name.startswith("""prompts_generator.norm""" ):
snake_case_ :Union[str, Any] = name.replace("""prompts_generator.norm""", """prompts_generator.layernorm""" )
return name
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
snake_case_ :Dict = orig_state_dict.pop(_lowercase )
if "attn.in_proj" in key:
snake_case_ :Optional[Any] = key.split(""".""" )
if key.startswith("""visual""" ):
snake_case_ :Any = key_split[3]
snake_case_ :Optional[Any] = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
snake_case_ :str = val[
:dim, :
]
snake_case_ :Optional[int] = val[
dim : dim * 2, :
]
snake_case_ :Union[str, Any] = val[
-dim:, :
]
else:
snake_case_ :Dict = val[
:dim
]
snake_case_ :Optional[int] = val[
dim : dim * 2
]
snake_case_ :Optional[int] = val[
-dim:
]
else:
if "weight" in key:
snake_case_ :Optional[Any] = val[
:dim, :
]
snake_case_ :List[str] = val[
dim : dim * 2, :
]
snake_case_ :Dict = val[
-dim:, :
]
else:
snake_case_ :Union[str, Any] = val[:dim]
snake_case_ :Union[str, Any] = val[
dim : dim * 2
]
snake_case_ :Union[str, Any] = val[-dim:]
elif key.startswith("""mit""" ):
snake_case_ :Tuple = key_split[2]
snake_case_ :Union[str, Any] = config.vision_config.mit_hidden_size
if "weight" in key:
snake_case_ :Optional[int] = val[:dim, :]
snake_case_ :Optional[int] = val[dim : dim * 2, :]
snake_case_ :str = val[-dim:, :]
else:
snake_case_ :str = val[:dim]
snake_case_ :Any = val[dim : dim * 2]
snake_case_ :int = val[-dim:]
else:
snake_case_ :Tuple = key_split[2]
snake_case_ :Any = config.text_config.hidden_size
if "weight" in key:
snake_case_ :Dict = val[:dim, :]
snake_case_ :Dict = val[
dim : dim * 2, :
]
snake_case_ :List[str] = val[-dim:, :]
else:
snake_case_ :Any = val[:dim]
snake_case_ :Tuple = val[
dim : dim * 2
]
snake_case_ :List[str] = val[-dim:]
else:
snake_case_ :Optional[int] = rename_key(_lowercase )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
snake_case_ :Optional[Any] = val.T
snake_case_ :Tuple = val
return orig_state_dict
def A_ ( _lowercase ):
'''simple docstring'''
if num_frames == 8:
snake_case_ :str = """eating_spaghetti_8_frames.npy"""
elif num_frames == 16:
snake_case_ :int = """eating_spaghetti.npy"""
elif num_frames == 32:
snake_case_ :List[str] = """eating_spaghetti_32_frames.npy"""
snake_case_ :int = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""", filename=_lowercase, repo_type="""dataset""", )
snake_case_ :Union[str, Any] = np.load(_lowercase )
return list(_lowercase )
def A_ ( _lowercase, _lowercase=None, _lowercase=False ):
'''simple docstring'''
snake_case_ :List[Any] = {
# fully supervised kinetics-400 checkpoints
"""xclip-base-patch32""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth""",
"""xclip-base-patch32-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"""
),
"""xclip-base-patch16""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth""",
"""xclip-base-patch16-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"""
),
"""xclip-large-patch14""": """https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb""",
"""xclip-large-patch14-16-frames""": """https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f""",
# fully supervised kinetics-600 checkpoints
"""xclip-base-patch16-kinetics-600""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"""
),
"""xclip-base-patch16-kinetics-600-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"""
),
"""xclip-large-patch14-kinetics-600""": """https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be""",
# few shot
"""xclip-base-patch16-hmdb-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"""
),
"""xclip-base-patch16-hmdb-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"""
),
"""xclip-base-patch16-hmdb-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"""
),
"""xclip-base-patch16-hmdb-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"""
),
"""xclip-base-patch16-ucf-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"""
),
"""xclip-base-patch16-ucf-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"""
),
"""xclip-base-patch16-ucf-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"""
),
"""xclip-base-patch16-ucf-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"""
),
# zero shot
"""xclip-base-patch16-zero-shot""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth""",
}
snake_case_ :Optional[int] = model_to_url[model_name]
snake_case_ :int = 8
if "16-frames" in model_name:
snake_case_ :List[Any] = 16
elif "shot" in model_name:
snake_case_ :Dict = 32
snake_case_ :Optional[int] = get_xclip_config(_lowercase, _lowercase )
snake_case_ :Optional[Any] = XCLIPModel(_lowercase )
model.eval()
if "drive" in checkpoint_url:
snake_case_ :List[str] = """pytorch_model.bin"""
gdown.cached_download(_lowercase, _lowercase, quiet=_lowercase )
snake_case_ :List[Any] = torch.load(_lowercase, map_location="""cpu""" )["""model"""]
else:
snake_case_ :Tuple = torch.hub.load_state_dict_from_url(_lowercase )["""model"""]
snake_case_ :Union[str, Any] = convert_state_dict(_lowercase, _lowercase )
snake_case_ :str = XCLIPModel(_lowercase )
snake_case_, snake_case_ :Optional[int] = model.load_state_dict(_lowercase, strict=_lowercase )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
snake_case_ :List[str] = 336 if model_name == """xclip-large-patch14-16-frames""" else 224
snake_case_ :List[Any] = VideoMAEImageProcessor(size=_lowercase )
snake_case_ :Any = CLIPTokenizer.from_pretrained("""openai/clip-vit-base-patch32""" )
snake_case_ :str = CLIPTokenizerFast.from_pretrained("""openai/clip-vit-base-patch32""" )
snake_case_ :Optional[Any] = XCLIPProcessor(image_processor=_lowercase, tokenizer=_lowercase )
snake_case_ :Optional[int] = prepare_video(_lowercase )
snake_case_ :Optional[Any] = processor(
text=["""playing sports""", """eating spaghetti""", """go shopping"""], videos=_lowercase, return_tensors="""pt""", padding=_lowercase )
print("""Shape of pixel values:""", inputs.pixel_values.shape )
with torch.no_grad():
snake_case_ :List[Any] = model(**_lowercase )
# Verify outputs
snake_case_ :List[Any] = outputs.logits_per_video
snake_case_ :Any = logits_per_video.softmax(dim=1 )
print("""Probs:""", _lowercase )
# kinetics-400
if model_name == "xclip-base-patch32":
snake_case_ :Union[str, Any] = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
snake_case_ :str = torch.tensor([[7.09_99e-04, 9.98_83e-01, 4.55_80e-04]] )
elif model_name == "xclip-base-patch16":
snake_case_ :Tuple = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
snake_case_ :Any = torch.tensor([[7.69_37e-04, 9.97_28e-01, 1.94_73e-03]] )
elif model_name == "xclip-large-patch14":
snake_case_ :str = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
snake_case_ :Tuple = torch.tensor([[3.38_77e-04, 9.99_37e-01, 2.88_88e-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
snake_case_ :List[Any] = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
snake_case_ :Union[str, Any] = torch.tensor([[3.85_54e-04, 9.99_29e-01, 3.27_54e-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
snake_case_ :List[Any] = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
snake_case_ :Dict = torch.tensor([[7.18_90e-06, 9.99_94e-01, 5.65_59e-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
snake_case_ :Union[str, Any] = torch.tensor([[1.03_20e-05, 9.99_93e-01, 6.24_35e-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
snake_case_ :str = torch.tensor([[4.13_77e-06, 9.99_90e-01, 9.83_86e-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
snake_case_ :str = torch.tensor([[4.13_47e-05, 9.99_62e-01, 3.34_11e-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
snake_case_ :int = torch.tensor([[8.58_57e-05, 9.99_28e-01, 6.32_91e-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
snake_case_ :Optional[int] = torch.tensor([[8.58_57e-05, 9.99_28e-01, 6.32_91e-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
snake_case_ :Any = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
snake_case_ :Tuple = torch.tensor([[9.82_19e-04, 9.95_93e-01, 3.08_63e-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
snake_case_ :Union[str, Any] = torch.tensor([[3.50_82e-04, 9.97_85e-01, 1.79_66e-03]] )
else:
raise ValueError(f"""Model name {model_name} not supported""" )
assert torch.allclose(_lowercase, _lowercase, atol=1e-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
if push_to_hub:
print("""Pushing model, processor and slow tokenizer files to the hub...""" )
model.push_to_hub(_lowercase, organization="""nielsr""" )
processor.push_to_hub(_lowercase, organization="""nielsr""" )
slow_tokenizer.push_to_hub(_lowercase, organization="""nielsr""" )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="xclip-base-patch32",
type=str,
help="Name of the model.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__a = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 66 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ = logging.get_logger(__name__)
def __magic_name__ ( __a : List[str] ):
'''simple docstring'''
UpperCamelCase__ = DPTConfig(embedding_type="""hybrid""" )
if "large" in checkpoint_url:
UpperCamelCase__ = 1_024
UpperCamelCase__ = 4_096
UpperCamelCase__ = 24
UpperCamelCase__ = 16
UpperCamelCase__ = [5, 11, 17, 23]
UpperCamelCase__ = [256, 512, 1_024, 1_024]
UpperCamelCase__ = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
UpperCamelCase__ = 768
UpperCamelCase__ = [1, 1, 1, 0.5]
UpperCamelCase__ = [256, 512, 768, 768]
UpperCamelCase__ = 150
UpperCamelCase__ = 16
UpperCamelCase__ = (1, 384, 384)
UpperCamelCase__ = False
UpperCamelCase__ = """project"""
if "ade" in checkpoint_url:
UpperCamelCase__ = True
UpperCamelCase__ = 768
UpperCamelCase__ = [1, 1, 1, 0.5]
UpperCamelCase__ = 150
UpperCamelCase__ = 16
UpperCamelCase__ = """huggingface/label-files"""
UpperCamelCase__ = """ade20k-id2label.json"""
UpperCamelCase__ = json.load(open(cached_download(hf_hub_url(_lowercase , _lowercase , repo_type="""dataset""" ) ) , """r""" ) )
UpperCamelCase__ = {int(_lowercase ): v for k, v in idalabel.items()}
UpperCamelCase__ = idalabel
UpperCamelCase__ = {v: k for k, v in idalabel.items()}
UpperCamelCase__ = [1, 150, 480, 480]
return config, expected_shape
def __magic_name__ ( __a : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(_lowercase , _lowercase )
def __magic_name__ ( __a : str ):
'''simple docstring'''
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
UpperCamelCase__ = name.replace("""pretrained.model""" , """dpt.encoder""" )
if "pretrained.model" in name:
UpperCamelCase__ = name.replace("""pretrained.model""" , """dpt.embeddings""" )
if "patch_embed" in name:
UpperCamelCase__ = name.replace("""patch_embed""" , """""" )
if "pos_embed" in name:
UpperCamelCase__ = name.replace("""pos_embed""" , """position_embeddings""" )
if "attn.proj" in name:
UpperCamelCase__ = name.replace("""attn.proj""" , """attention.output.dense""" )
if "proj" in name and "project" not in name:
UpperCamelCase__ = name.replace("""proj""" , """projection""" )
if "blocks" in name:
UpperCamelCase__ = name.replace("""blocks""" , """layer""" )
if "mlp.fc1" in name:
UpperCamelCase__ = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
UpperCamelCase__ = name.replace("""mlp.fc2""" , """output.dense""" )
if "norm1" in name and "backbone" not in name:
UpperCamelCase__ = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name and "backbone" not in name:
UpperCamelCase__ = name.replace("""norm2""" , """layernorm_after""" )
if "scratch.output_conv" in name:
UpperCamelCase__ = name.replace("""scratch.output_conv""" , """head""" )
if "scratch" in name:
UpperCamelCase__ = name.replace("""scratch""" , """neck""" )
if "layer1_rn" in name:
UpperCamelCase__ = name.replace("""layer1_rn""" , """convs.0""" )
if "layer2_rn" in name:
UpperCamelCase__ = name.replace("""layer2_rn""" , """convs.1""" )
if "layer3_rn" in name:
UpperCamelCase__ = name.replace("""layer3_rn""" , """convs.2""" )
if "layer4_rn" in name:
UpperCamelCase__ = name.replace("""layer4_rn""" , """convs.3""" )
if "refinenet" in name:
UpperCamelCase__ = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
UpperCamelCase__ = name.replace(f"refinenet{layer_idx}" , f"fusion_stage.layers.{abs(layer_idx-4 )}" )
if "out_conv" in name:
UpperCamelCase__ = name.replace("""out_conv""" , """projection""" )
if "resConfUnit1" in name:
UpperCamelCase__ = name.replace("""resConfUnit1""" , """residual_layer1""" )
if "resConfUnit2" in name:
UpperCamelCase__ = name.replace("""resConfUnit2""" , """residual_layer2""" )
if "conv1" in name:
UpperCamelCase__ = name.replace("""conv1""" , """convolution1""" )
if "conv2" in name:
UpperCamelCase__ = name.replace("""conv2""" , """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
UpperCamelCase__ = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
UpperCamelCase__ = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
UpperCamelCase__ = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
UpperCamelCase__ = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
UpperCamelCase__ = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
UpperCamelCase__ = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
UpperCamelCase__ = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
UpperCamelCase__ = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
UpperCamelCase__ = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
UpperCamelCase__ = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
UpperCamelCase__ = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
UpperCamelCase__ = name.replace("""pretrained""" , """dpt""" )
if "bn" in name:
UpperCamelCase__ = name.replace("""bn""" , """batch_norm""" )
if "head" in name:
UpperCamelCase__ = name.replace("""head""" , """head.head""" )
if "encoder.norm" in name:
UpperCamelCase__ = name.replace("""encoder.norm""" , """layernorm""" )
if "auxlayer" in name:
UpperCamelCase__ = name.replace("""auxlayer""" , """auxiliary_head.head""" )
if "backbone" in name:
UpperCamelCase__ = name.replace("""backbone""" , """backbone.bit.encoder""" )
if ".." in name:
UpperCamelCase__ = name.replace("""..""" , """.""" )
if "stem.conv" in name:
UpperCamelCase__ = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
UpperCamelCase__ = name.replace("""blocks""" , """layers""" )
if "convolution" in name and "backbone" in name:
UpperCamelCase__ = name.replace("""convolution""" , """conv""" )
if "layer" in name and "backbone" in name:
UpperCamelCase__ = name.replace("""layer""" , """layers""" )
if "backbone.bit.encoder.bit" in name:
UpperCamelCase__ = name.replace("""backbone.bit.encoder.bit""" , """backbone.bit""" )
if "embedder.conv" in name:
UpperCamelCase__ = name.replace("""embedder.conv""" , """embedder.convolution""" )
if "backbone.bit.encoder.stem.norm" in name:
UpperCamelCase__ = name.replace("""backbone.bit.encoder.stem.norm""" , """backbone.bit.embedder.norm""" )
return name
def __magic_name__ ( __a : str , __a : Tuple ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCamelCase__ = state_dict.pop(f"dpt.encoder.layer.{i}.attn.qkv.weight" )
UpperCamelCase__ = state_dict.pop(f"dpt.encoder.layer.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ = in_proj_weight[: config.hidden_size, :]
UpperCamelCase__ = in_proj_bias[: config.hidden_size]
UpperCamelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCamelCase__ = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase__ = in_proj_bias[-config.hidden_size :]
def __magic_name__ ( ):
'''simple docstring'''
UpperCamelCase__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCamelCase__ = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def __magic_name__ ( __a : Tuple , __a : str , __a : Tuple , __a : Tuple , __a : str ):
'''simple docstring'''
UpperCamelCase__ = get_dpt_config(_lowercase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
UpperCamelCase__ = torch.load(_lowercase , map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(_lowercase )
# rename keys
for key in state_dict.copy().keys():
UpperCamelCase__ = state_dict.pop(_lowercase )
UpperCamelCase__ = val
# read in qkv matrices
read_in_q_k_v(_lowercase , _lowercase )
# load HuggingFace model
UpperCamelCase__ = DPTForSemanticSegmentation(_lowercase ) if """ade""" in checkpoint_url else DPTForDepthEstimation(_lowercase )
model.load_state_dict(_lowercase )
model.eval()
# Check outputs on an image
UpperCamelCase__ = 480 if """ade""" in checkpoint_url else 384
UpperCamelCase__ = DPTImageProcessor(size=_lowercase )
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(_lowercase , return_tensors="""pt""" )
# forward pass
UpperCamelCase__ = model(**_lowercase ).logits if """ade""" in checkpoint_url else model(**_lowercase ).predicted_depth
if show_prediction:
UpperCamelCase__ = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="""bicubic""" , align_corners=_lowercase , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowercase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_lowercase )
if push_to_hub:
model.push_to_hub("""ybelkada/dpt-hybrid-midas""" )
image_processor.push_to_hub("""ybelkada/dpt-hybrid-midas""" )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''',
type=str,
help='''URL of the original DPT checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=False,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
parser.add_argument(
'''--model_name''',
default='''dpt-large''',
type=str,
help='''Name of the model, in case you\'re pushing to the hub.''',
)
parser.add_argument(
'''--show_prediction''',
action='''store_true''',
)
lowerCamelCase_ = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 244 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self: List[Any] , snake_case: List[str] , snake_case: Optional[Any]=13 , snake_case: List[str]=7 , snake_case: Dict=True , snake_case: List[str]=True , snake_case: Optional[int]=True , snake_case: Any=True , snake_case: Optional[Any]=99 , snake_case: Tuple=32 , snake_case: Tuple=5 , snake_case: Dict=4 , snake_case: Optional[Any]=37 , snake_case: Union[str, Any]="gelu" , snake_case: Tuple=0.1 , snake_case: List[Any]=0.1 , snake_case: List[str]=512 , snake_case: Optional[int]=16 , snake_case: int=2 , snake_case: List[Any]=0.0_2 , snake_case: Union[str, Any]=4 , ) -> List[str]:
snake_case_ :Dict = parent
snake_case_ :Any = batch_size
snake_case_ :Any = seq_length
snake_case_ :List[str] = is_training
snake_case_ :Optional[Any] = use_attention_mask
snake_case_ :Dict = use_token_type_ids
snake_case_ :Union[str, Any] = use_labels
snake_case_ :str = vocab_size
snake_case_ :int = hidden_size
snake_case_ :List[str] = num_hidden_layers
snake_case_ :Dict = num_attention_heads
snake_case_ :Any = intermediate_size
snake_case_ :Tuple = hidden_act
snake_case_ :int = hidden_dropout_prob
snake_case_ :Optional[Any] = attention_probs_dropout_prob
snake_case_ :Any = max_position_embeddings
snake_case_ :Union[str, Any] = type_vocab_size
snake_case_ :Optional[int] = type_sequence_label_size
snake_case_ :Union[str, Any] = initializer_range
snake_case_ :Tuple = num_choices
def lowerCAmelCase_ ( self: Tuple ) -> str:
snake_case_ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ :Union[str, Any] = None
if self.use_attention_mask:
snake_case_ :str = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ :Any = None
if self.use_token_type_ids:
snake_case_ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ :int = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase_ ( self: Optional[int] ) -> int:
snake_case_ :str = self.prepare_config_and_inputs()
snake_case_, snake_case_, snake_case_, snake_case_ :Optional[int] = config_and_inputs
snake_case_ :Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowerCAmelCase_ ( self: Optional[Any] ) -> Any:
snake_case_ :int = self.prepare_config_and_inputs()
snake_case_, snake_case_, snake_case_, snake_case_ :Dict = config_and_inputs
snake_case_ :Union[str, Any] = True
snake_case_ :Optional[int] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case_ :Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class lowerCamelCase ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_A : List[str] = True
_A : Dict = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase_ ( self: int ) -> List[str]:
snake_case_ :Any = FlaxBertModelTester(self )
@slow
def lowerCAmelCase_ ( self: List[str] ) -> Dict:
# Only check this for base model, not necessary for all model classes.
# This will also help speed-up tests.
snake_case_ :Dict = FlaxBertModel.from_pretrained("""bert-base-cased""" )
snake_case_ :Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(snake_case )
| 66 | 0 |
"""simple docstring"""
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def _A ( lowercase ):
"""simple docstring"""
a =int(_lowercase )
a =t // 36_00, (t // 60) % 60, t % 60
return f'''{h}:{m:02d}:{s:02d}''' if h != 0 else f'''{m:02d}:{s:02d}'''
def _A ( lowercase , lowercase , lowercase , lowercase , lowercase=3_00 ):
"""simple docstring"""
return f'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def _A ( lowercase ):
"""simple docstring"""
a ="""<table border=\"1\" class=\"dataframe\">\n"""
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
a =f'''{elt:.6f}''' if isinstance(_lowercase , _lowercase ) else str(_lowercase )
html_code += f''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class __A :
"""simple docstring"""
__lowerCAmelCase = 5
__lowerCAmelCase = 0.2
def __init__( self , __A , __A = None , __A = True , __A = None , __A = 300 , ) -> List[Any]:
a =total
a ="""""" if prefix is None else prefix
a =leave
a =parent
a =width
a =None
a =None
a =None
def SCREAMING_SNAKE_CASE ( self , __A , __A = False , __A = None ) -> Any:
a =value
if comment is not None:
a =comment
if self.last_value is None:
a =time.time()
a =value
a =None
a =self.warmup
a =1
self.update_bar(__A )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
a =time.time()
a =current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
a =self.elapsed_time / (value - self.start_value)
else:
a =None
if value >= self.total:
a =self.total
a =None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
a =self.average_time_per_item * (self.total - value)
self.update_bar(__A )
a =value
a =current_time
if self.average_time_per_item is None:
a =1
else:
a =max(int(self.update_every / self.average_time_per_item ) , 1 )
def SCREAMING_SNAKE_CASE ( self , __A , __A=None ) -> Dict:
a =""" """ * (len(str(self.total ) ) - len(str(__A ) )) + str(__A )
if self.elapsed_time is None:
a =f'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
a =f'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'''
else:
a =(
f'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'''
f''' {format_time(self.predicted_remaining )}'''
)
self.label += f''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment ) == 0 else f''', {self.comment}]'''
self.display()
def SCREAMING_SNAKE_CASE ( self ) -> Any:
a =html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
a =disp.display(disp.HTML(self.html_code ) , display_id=__A )
else:
self.output.update(disp.HTML(self.html_code ) )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('''''' ) )
class __A ( _lowerCAmelCase ):
"""simple docstring"""
def __init__( self , __A , __A=None ) -> Any:
super().__init__(__A )
a =None if column_names is None else [column_names]
a =None
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a =html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
a =disp.display(disp.HTML(self.html_code ) , display_id=__A )
else:
self.output.update(disp.HTML(self.html_code ) )
def SCREAMING_SNAKE_CASE ( self , __A ) -> List[Any]:
if self.inner_table is None:
a =[list(values.keys() ), list(values.values() )]
else:
a =self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(__A )
a =columns
self.inner_table.append([values[c] for c in columns] )
def SCREAMING_SNAKE_CASE ( self , __A , __A=None , __A=300 ) -> Any:
a =NotebookProgressBar(__A , prefix=__A , parent=self , width=__A )
return self.child_bar
def SCREAMING_SNAKE_CASE ( self ) -> Any:
a =None
self.display()
class __A ( _lowerCAmelCase ):
"""simple docstring"""
def __init__( self ) -> Any:
a =None
a =None
a =False
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , **__A ) -> Tuple:
a ="""Epoch""" if args.evaluation_strategy == IntervalStrategy.EPOCH else """Step"""
a =0
a =0
a =[self.first_column] + ["""Training Loss"""]
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('''Validation Loss''' )
a =NotebookTrainingTracker(state.max_steps , __A )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , **__A ) -> Optional[int]:
a =int(state.epoch ) if int(state.epoch ) == state.epoch else f'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1 , comment=f'''Epoch {epoch}/{state.num_train_epochs}''' , force_update=self._force_next_update , )
a =False
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , __A=None , **__A ) -> Any:
if not has_length(__A ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
a =self.training_tracker.add_child(len(__A ) )
else:
a =NotebookProgressBar(len(__A ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , **__A ) -> Any:
if self.prediction_bar is not None:
self.prediction_bar.close()
a =None
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , __A=None , **__A ) -> Any:
# Only for when there is no evaluation
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
a ={"""Training Loss""": logs["""loss"""]}
# First column is necessarily Step sine we're not in epoch eval strategy
a =state.global_step
self.training_tracker.write_line(__A )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , __A=None , **__A ) -> List[Any]:
if self.training_tracker is not None:
a ={"""Training Loss""": """No log""", """Validation Loss""": """No log"""}
for log in reversed(state.log_history ):
if "loss" in log:
a =log["""loss"""]
break
if self.first_column == "Epoch":
a =int(state.epoch )
else:
a =state.global_step
a ="""eval"""
for k in metrics:
if k.endswith('''_loss''' ):
a =re.sub(r'''\_loss$''' , '''''' , __A )
a =metrics.pop('''total_flos''' , __A )
a =metrics.pop('''epoch''' , __A )
a =metrics.pop(f'''{metric_key_prefix}_runtime''' , __A )
a =metrics.pop(f'''{metric_key_prefix}_samples_per_second''' , __A )
a =metrics.pop(f'''{metric_key_prefix}_steps_per_second''' , __A )
a =metrics.pop(f'''{metric_key_prefix}_jit_compilation_time''' , __A )
for k, v in metrics.items():
if k == f'''{metric_key_prefix}_loss''':
a =v
else:
a =k.split('''_''' )
a =""" """.join([part.capitalize() for part in splits[1:]] )
a =v
self.training_tracker.write_line(__A )
self.training_tracker.remove_child()
a =None
# Evaluation takes a long time so we should force the next update.
a =True
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , **__A ) -> Any:
self.training_tracker.update(
state.global_step , comment=f'''Epoch {int(state.epoch )}/{state.num_train_epochs}''' , force_update=__A )
a =None | 81 |
"""simple docstring"""
import math
class lowerCamelCase :
'''simple docstring'''
def lowerCAmelCase_ ( self: Tuple , snake_case: list[list[float]] , snake_case: list[int] ) -> int:
snake_case_ :Any = 0.0
snake_case_ :Tuple = 0.0
for i in range(len(snake_case ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def lowerCAmelCase_ ( self: Optional[int] , snake_case: list[list[int | float]] , snake_case: list[int] , snake_case: int , snake_case: float ) -> list[list[int | float]]:
for i in range(len(snake_case ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def A_ ( ):
'''simple docstring'''
snake_case_ :Dict = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
snake_case_ :List[Any] = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
snake_case_ :Optional[Any] = SelfOrganizingMap()
snake_case_ :Dict = 3
snake_case_ :Dict = 0.5
for _ in range(_lowercase ):
for j in range(len(_lowercase ) ):
# training sample
snake_case_ :List[Any] = training_samples[j]
# Compute the winning vector
snake_case_ :Optional[int] = self_organizing_map.get_winner(_lowercase, _lowercase )
# Update the winning vector
snake_case_ :List[str] = self_organizing_map.update(_lowercase, _lowercase, _lowercase, _lowercase )
# classify test sample
snake_case_ :str = [0, 0, 0, 1]
snake_case_ :List[Any] = self_organizing_map.get_winner(_lowercase, _lowercase )
# results
print(f"""Clusters that the test sample belongs to : {winner}""" )
print(f"""Weights that have been trained : {weights}""" )
# running the main() function
if __name__ == "__main__":
main()
| 66 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : Dict ) -> str:
if number < 0 or shift_amount < 0:
raise ValueError("""both inputs must be positive integers""" )
_a : Union[str, Any] =str(bin(_lowercase ) )
binary_number += "0" * shift_amount
return binary_number
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : List[Any] ) -> Optional[int]:
if number < 0 or shift_amount < 0:
raise ValueError("""both inputs must be positive integers""" )
_a : Optional[int] =str(bin(_lowercase ) )[2:]
if shift_amount >= len(_lowercase ):
return "0b0"
_a : Optional[int] =binary_number[: len(_lowercase ) - shift_amount]
return "0b" + shifted_binary_number
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Any ,_UpperCAmelCase : Optional[Any] ) -> Dict:
if number >= 0: # Get binary representation of positive number
_a : Any ="""0""" + str(bin(_lowercase ) ).strip("""-""" )[2:]
else: # Get binary (2's complement) representation of negative number
_a : Dict =len(bin(_lowercase )[3:] ) # Find 2's complement of number
_a : Optional[int] =bin(abs(_lowercase ) - (1 << binary_number_length) )[3:]
_a : Any =(
"""1""" + """0""" * (binary_number_length - len(_lowercase )) + binary_number
)
if shift_amount >= len(_lowercase ):
return "0b" + binary_number[0] * len(_lowercase )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(_lowercase ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 276 |
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase :
'''simple docstring'''
def __init__( self: Optional[int] , snake_case: Any , snake_case: Optional[Any]=13 , snake_case: Tuple=32 , snake_case: Optional[int]=2 , snake_case: Tuple=3 , snake_case: Tuple=16 , snake_case: Optional[Any]=[1, 2, 1] , snake_case: Optional[int]=[2, 2, 4] , snake_case: Optional[int]=2 , snake_case: int=2.0 , snake_case: Union[str, Any]=True , snake_case: List[str]=0.0 , snake_case: List[Any]=0.0 , snake_case: Optional[Any]=0.1 , snake_case: List[Any]="gelu" , snake_case: Optional[int]=False , snake_case: Union[str, Any]=True , snake_case: Union[str, Any]=0.0_2 , snake_case: Optional[int]=1E-5 , snake_case: Optional[Any]=True , snake_case: List[Any]=None , snake_case: List[Any]=True , snake_case: Optional[Any]=10 , snake_case: str=8 , ) -> Tuple:
snake_case_ :Dict = parent
snake_case_ :Any = batch_size
snake_case_ :List[Any] = image_size
snake_case_ :List[Any] = patch_size
snake_case_ :int = num_channels
snake_case_ :Tuple = embed_dim
snake_case_ :str = depths
snake_case_ :str = num_heads
snake_case_ :Optional[int] = window_size
snake_case_ :Tuple = mlp_ratio
snake_case_ :Any = qkv_bias
snake_case_ :List[Any] = hidden_dropout_prob
snake_case_ :Optional[Any] = attention_probs_dropout_prob
snake_case_ :Union[str, Any] = drop_path_rate
snake_case_ :Any = hidden_act
snake_case_ :Optional[Any] = use_absolute_embeddings
snake_case_ :Union[str, Any] = patch_norm
snake_case_ :Dict = layer_norm_eps
snake_case_ :str = initializer_range
snake_case_ :Tuple = is_training
snake_case_ :Tuple = scope
snake_case_ :Union[str, Any] = use_labels
snake_case_ :Optional[Any] = type_sequence_label_size
snake_case_ :Dict = encoder_stride
def lowerCAmelCase_ ( self: int ) -> int:
snake_case_ :List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ :Any = None
if self.use_labels:
snake_case_ :str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ :int = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ ( self: str ) -> Union[str, Any]:
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def lowerCAmelCase_ ( self: str , snake_case: Optional[int] , snake_case: Dict , snake_case: str ) -> List[Any]:
snake_case_ :Union[str, Any] = SwinvaModel(config=snake_case )
model.to(snake_case )
model.eval()
snake_case_ :Optional[int] = model(snake_case )
snake_case_ :Optional[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
snake_case_ :int = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowerCAmelCase_ ( self: int , snake_case: List[str] , snake_case: Tuple , snake_case: int ) -> Any:
snake_case_ :Dict = SwinvaForMaskedImageModeling(config=snake_case )
model.to(snake_case )
model.eval()
snake_case_ :Tuple = model(snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
snake_case_ :List[Any] = 1
snake_case_ :int = SwinvaForMaskedImageModeling(snake_case )
model.to(snake_case )
model.eval()
snake_case_ :Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ :int = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowerCAmelCase_ ( self: List[Any] , snake_case: Any , snake_case: List[str] , snake_case: Union[str, Any] ) -> Tuple:
snake_case_ :int = self.type_sequence_label_size
snake_case_ :List[Any] = SwinvaForImageClassification(snake_case )
model.to(snake_case )
model.eval()
snake_case_ :Dict = model(snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase_ ( self: int ) -> str:
snake_case_ :Any = self.prepare_config_and_inputs()
snake_case_, snake_case_, snake_case_ :List[str] = config_and_inputs
snake_case_ :List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_A : Optional[Any] = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
_A : Any = (
{"""feature-extraction""": SwinvaModel, """image-classification""": SwinvaForImageClassification}
if is_torch_available()
else {}
)
_A : List[Any] = False
_A : List[str] = False
_A : Tuple = False
_A : List[str] = False
def lowerCAmelCase_ ( self: Dict ) -> List[Any]:
snake_case_ :Optional[int] = SwinvaModelTester(self )
snake_case_ :List[str] = ConfigTester(self , config_class=snake_case , embed_dim=37 )
def lowerCAmelCase_ ( self: Union[str, Any] ) -> List[Any]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Tuple:
snake_case_ :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
@unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" )
def lowerCAmelCase_ ( self: Union[str, Any] ) -> str:
pass
@unittest.skip(reason="""Swinv2 does not use inputs_embeds""" )
def lowerCAmelCase_ ( self: int ) -> Dict:
pass
def lowerCAmelCase_ ( self: List[str] ) -> Union[str, Any]:
snake_case_, snake_case_ :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ :Optional[int] = model_class(snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case_ :List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case , nn.Linear ) )
def lowerCAmelCase_ ( self: Dict ) -> Optional[int]:
snake_case_, snake_case_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ :Optional[int] = model_class(snake_case )
snake_case_ :List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ :int = [*signature.parameters.keys()]
snake_case_ :List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case )
def lowerCAmelCase_ ( self: List[str] ) -> Optional[Any]:
snake_case_, snake_case_ :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ :List[str] = True
for model_class in self.all_model_classes:
snake_case_ :List[Any] = True
snake_case_ :Any = False
snake_case_ :Optional[int] = True
snake_case_ :Tuple = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
snake_case_ :Any = model(**self._prepare_for_class(snake_case , snake_case ) )
snake_case_ :str = outputs.attentions
snake_case_ :Dict = len(self.model_tester.depths )
self.assertEqual(len(snake_case ) , snake_case )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case_ :Union[str, Any] = True
snake_case_ :Tuple = config.window_size**2
snake_case_ :Any = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
snake_case_ :Union[str, Any] = model(**self._prepare_for_class(snake_case , snake_case ) )
snake_case_ :int = outputs.attentions
self.assertEqual(len(snake_case ) , snake_case )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
snake_case_ :Any = len(snake_case )
# Check attention is always last and order is fine
snake_case_ :int = True
snake_case_ :Dict = True
snake_case_ :Optional[int] = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
snake_case_ :Dict = model(**self._prepare_for_class(snake_case , snake_case ) )
if hasattr(self.model_tester , """num_hidden_states_types""" ):
snake_case_ :Any = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
snake_case_ :int = 2
self.assertEqual(out_len + added_hidden_states , len(snake_case ) )
snake_case_ :str = outputs.attentions
self.assertEqual(len(snake_case ) , snake_case )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def lowerCAmelCase_ ( self: int , snake_case: Dict , snake_case: Dict , snake_case: Optional[Any] , snake_case: Dict ) -> List[str]:
snake_case_ :Dict = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
snake_case_ :Optional[int] = model(**self._prepare_for_class(snake_case , snake_case ) )
snake_case_ :str = outputs.hidden_states
snake_case_ :List[Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(snake_case ) , snake_case )
# Swinv2 has a different seq_length
snake_case_ :List[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case_ :Optional[int] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
snake_case_ :str = outputs.reshaped_hidden_states
self.assertEqual(len(snake_case ) , snake_case )
snake_case_, snake_case_, snake_case_, snake_case_ :Any = reshaped_hidden_states[0].shape
snake_case_ :int = (
reshaped_hidden_states[0].view(snake_case , snake_case , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowerCAmelCase_ ( self: Any ) -> Any:
snake_case_, snake_case_ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ :Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
snake_case_ :Union[str, Any] = True
self.check_hidden_states_output(snake_case , snake_case , snake_case , snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ :List[str] = True
self.check_hidden_states_output(snake_case , snake_case , snake_case , snake_case )
def lowerCAmelCase_ ( self: Tuple ) -> Any:
snake_case_, snake_case_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ :Optional[int] = 3
snake_case_ :Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
snake_case_ :str = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case_ :Any = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
snake_case_ :int = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
snake_case_ :str = True
self.check_hidden_states_output(snake_case , snake_case , snake_case , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ :Tuple = True
self.check_hidden_states_output(snake_case , snake_case , snake_case , (padded_height, padded_width) )
def lowerCAmelCase_ ( self: Any ) -> Tuple:
snake_case_ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*snake_case )
def lowerCAmelCase_ ( self: Optional[int] ) -> Dict:
snake_case_ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
@slow
def lowerCAmelCase_ ( self: List[Any] ) -> Dict:
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ :List[str] = SwinvaModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def lowerCAmelCase_ ( self: Optional[int] ) -> List[Any]:
snake_case_, snake_case_ :str = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ :Optional[int] = _config_zero_init(snake_case )
for model_class in self.all_model_classes:
snake_case_ :Tuple = model_class(config=snake_case )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ ( self: Optional[int] ) -> List[Any]:
return (
AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase_ ( self: List[str] ) -> List[str]:
snake_case_ :Tuple = SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to(
snake_case )
snake_case_ :str = self.default_image_processor
snake_case_ :List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
snake_case_ :str = image_processor(images=snake_case , return_tensors="""pt""" ).to(snake_case )
# forward pass
with torch.no_grad():
snake_case_ :Tuple = model(**snake_case )
# verify the logits
snake_case_ :Dict = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , snake_case )
snake_case_ :int = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1E-4 ) )
| 66 | 0 |
'''simple docstring'''
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class A__ ( _lowerCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : Dict = (IPNDMScheduler,)
UpperCamelCase_ : Any = (("""num_inference_steps""", 50),)
def _lowerCAmelCase ( self : Union[str, Any] , **lowerCAmelCase__ : str ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Tuple = {"""num_train_timesteps""": 1_0_0_0}
config.update(**lowerCAmelCase__ )
return config
def _lowerCAmelCase ( self : List[str] , lowerCAmelCase__ : int=0 , **lowerCAmelCase__ : str ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = dict(self.forward_default_kwargs )
_UpperCAmelCase : str = kwargs.pop("num_inference_steps" , lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = self.dummy_sample
_UpperCAmelCase : Tuple = 0.1 * sample
_UpperCAmelCase : str = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase : int = self.get_scheduler_config(**lowerCAmelCase__ )
_UpperCAmelCase : str = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(lowerCAmelCase__ )
# copy over dummy past residuals
_UpperCAmelCase : str = dummy_past_residuals[:]
if time_step is None:
_UpperCAmelCase : Optional[Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase__ )
_UpperCAmelCase : List[str] = scheduler_class.from_pretrained(lowerCAmelCase__ )
new_scheduler.set_timesteps(lowerCAmelCase__ )
# copy over dummy past residuals
_UpperCAmelCase : str = dummy_past_residuals[:]
_UpperCAmelCase : int = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
_UpperCAmelCase : Optional[int] = new_scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
_UpperCAmelCase : Optional[int] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
_UpperCAmelCase : Tuple = new_scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
pass
def _lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase__ : str=0 , **lowerCAmelCase__ : Dict ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : List[str] = dict(self.forward_default_kwargs )
_UpperCAmelCase : Optional[int] = kwargs.pop("num_inference_steps" , lowerCAmelCase__ )
_UpperCAmelCase : Dict = self.dummy_sample
_UpperCAmelCase : Optional[Any] = 0.1 * sample
_UpperCAmelCase : Dict = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase : Tuple = self.get_scheduler_config()
_UpperCAmelCase : List[str] = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(lowerCAmelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
_UpperCAmelCase : Any = dummy_past_residuals[:]
if time_step is None:
_UpperCAmelCase : Dict = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase__ )
_UpperCAmelCase : int = scheduler_class.from_pretrained(lowerCAmelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCAmelCase__ )
# copy over dummy past residual (must be after setting timesteps)
_UpperCAmelCase : Union[str, Any] = dummy_past_residuals[:]
_UpperCAmelCase : List[Any] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
_UpperCAmelCase : Union[str, Any] = new_scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
_UpperCAmelCase : Tuple = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
_UpperCAmelCase : List[str] = new_scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _lowerCAmelCase ( self : Optional[int] , **lowerCAmelCase__ : Tuple ) -> str:
"""simple docstring"""
_UpperCAmelCase : int = self.scheduler_classes[0]
_UpperCAmelCase : List[Any] = self.get_scheduler_config(**lowerCAmelCase__ )
_UpperCAmelCase : int = scheduler_class(**lowerCAmelCase__ )
_UpperCAmelCase : str = 1_0
_UpperCAmelCase : Optional[Any] = self.dummy_model()
_UpperCAmelCase : int = self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase : Tuple = model(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Dict = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase : Optional[int] = model(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Any = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).prev_sample
return sample
def _lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : str = dict(self.forward_default_kwargs )
_UpperCAmelCase : int = kwargs.pop("num_inference_steps" , lowerCAmelCase__ )
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase : List[str] = self.get_scheduler_config()
_UpperCAmelCase : Any = scheduler_class(**lowerCAmelCase__ )
_UpperCAmelCase : List[str] = self.dummy_sample
_UpperCAmelCase : Dict = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCAmelCase__ , "set_timesteps" ):
scheduler.set_timesteps(lowerCAmelCase__ )
elif num_inference_steps is not None and not hasattr(lowerCAmelCase__ , "set_timesteps" ):
_UpperCAmelCase : str = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_UpperCAmelCase : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
_UpperCAmelCase : Optional[int] = dummy_past_residuals[:]
_UpperCAmelCase : Optional[int] = scheduler.timesteps[5]
_UpperCAmelCase : str = scheduler.timesteps[6]
_UpperCAmelCase : int = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
_UpperCAmelCase : List[str] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
_UpperCAmelCase : str = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
_UpperCAmelCase : Optional[int] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _lowerCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
for timesteps in [1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase__ , time_step=lowerCAmelCase__ )
def _lowerCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 1_0] , [1_0, 5_0, 1_0_0] ):
self.check_over_forward(num_inference_steps=lowerCAmelCase__ , time_step=lowerCAmelCase__ )
def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = self.full_loop()
_UpperCAmelCase : str = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_mean.item() - 2_5_4_0_5_2_9 ) < 1_0 | 145 |
"""simple docstring"""
import re
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :Optional[int] = re.compile(
r"""^(?:0|94|\+94|0{2}94)""" r"""7(0|1|2|4|5|6|7|8)""" r"""(-| |)""" r"""\d{7}$""" )
return bool(re.search(_lowercase, _lowercase ) )
if __name__ == "__main__":
__a = "0094702343221"
print(is_sri_lankan_phone_number(phone))
| 66 | 0 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
__A : Any = logging.get_logger(__name__)
__A : Union[str, Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all LED models at https://huggingface.co/models?filter=LED
__A : Dict = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
__A : List[Any] = {
"allenai/led-base-16384": 16384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
_UpperCAmelCase = bs[:]
_UpperCAmelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_lowercase )
cs.append(2**8 + n )
n += 1
_UpperCAmelCase = [chr(_lowercase ) for n in cs]
return dict(zip(_lowercase , _lowercase ) )
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
_UpperCAmelCase = set()
_UpperCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_UpperCAmelCase = char
return pairs
class _a ( _lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = ["""input_ids""", """attention_mask"""]
def __init__( self : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[Any]="replace" , __UpperCamelCase : Optional[Any]="<s>" , __UpperCamelCase : Any="</s>" , __UpperCamelCase : int="</s>" , __UpperCamelCase : Optional[int]="<s>" , __UpperCamelCase : int="<unk>" , __UpperCamelCase : Any="<pad>" , __UpperCamelCase : str="<mask>" , __UpperCamelCase : Optional[int]=False , **__UpperCamelCase : str , )->List[Any]:
_UpperCAmelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else bos_token
_UpperCAmelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else eos_token
_UpperCAmelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else sep_token
_UpperCAmelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else cls_token
_UpperCAmelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else unk_token
_UpperCAmelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_UpperCAmelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else mask_token
super().__init__(
errors=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , add_prefix_space=__UpperCamelCase , **__UpperCamelCase , )
with open(__UpperCamelCase , encoding='''utf-8''' ) as vocab_handle:
_UpperCAmelCase = json.load(__UpperCamelCase )
_UpperCAmelCase = {v: k for k, v in self.encoder.items()}
_UpperCAmelCase = errors # how to handle errors in decoding
_UpperCAmelCase = bytes_to_unicode()
_UpperCAmelCase = {v: k for k, v in self.byte_encoder.items()}
with open(__UpperCamelCase , encoding='''utf-8''' ) as merges_handle:
_UpperCAmelCase = merges_handle.read().split('''\n''' )[1:-1]
_UpperCAmelCase = [tuple(merge.split() ) for merge in bpe_merges]
_UpperCAmelCase = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
_UpperCAmelCase = {}
_UpperCAmelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_UpperCAmelCase = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def lowercase__ ( self : str )->List[str]:
return len(self.encoder )
def lowercase__ ( self : Optional[int] )->int:
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase__ ( self : Tuple , __UpperCamelCase : Optional[Any] )->Union[str, Any]:
if token in self.cache:
return self.cache[token]
_UpperCAmelCase = tuple(__UpperCamelCase )
_UpperCAmelCase = get_pairs(__UpperCamelCase )
if not pairs:
return token
while True:
_UpperCAmelCase = min(__UpperCamelCase , key=lambda __UpperCamelCase : self.bpe_ranks.get(__UpperCamelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_UpperCAmelCase = bigram
_UpperCAmelCase = []
_UpperCAmelCase = 0
while i < len(__UpperCamelCase ):
try:
_UpperCAmelCase = word.index(__UpperCamelCase , __UpperCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_UpperCAmelCase = j
if word[i] == first and i < len(__UpperCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_UpperCAmelCase = tuple(__UpperCamelCase )
_UpperCAmelCase = new_word
if len(__UpperCamelCase ) == 1:
break
else:
_UpperCAmelCase = get_pairs(__UpperCamelCase )
_UpperCAmelCase = """ """.join(__UpperCamelCase )
_UpperCAmelCase = word
return word
def lowercase__ ( self : Optional[int] , __UpperCamelCase : List[str] )->str:
_UpperCAmelCase = []
for token in re.findall(self.pat , __UpperCamelCase ):
_UpperCAmelCase = """""".join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__UpperCamelCase ).split(''' ''' ) )
return bpe_tokens
def lowercase__ ( self : List[Any] , __UpperCamelCase : Tuple )->Optional[int]:
return self.encoder.get(__UpperCamelCase , self.encoder.get(self.unk_token ) )
def lowercase__ ( self : int , __UpperCamelCase : List[str] )->Tuple:
return self.decoder.get(__UpperCamelCase )
def lowercase__ ( self : Dict , __UpperCamelCase : Dict )->str:
_UpperCAmelCase = """""".join(__UpperCamelCase )
_UpperCAmelCase = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def lowercase__ ( self : str , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None )->Tuple[str]:
if not os.path.isdir(__UpperCamelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_UpperCAmelCase = os.path.join(
__UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_UpperCAmelCase = os.path.join(
__UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__UpperCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__UpperCamelCase , ensure_ascii=__UpperCamelCase ) + '''\n''' )
_UpperCAmelCase = 0
with open(__UpperCamelCase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCamelCase : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
_UpperCAmelCase = token_index
writer.write(''' '''.join(__UpperCamelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
def lowercase__ ( self : Optional[int] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None )->List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
_UpperCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase__ ( self : List[Any] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None , __UpperCamelCase : bool = False )->List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCamelCase )) + [1]
return [1] + ([0] * len(__UpperCamelCase )) + [1, 1] + ([0] * len(__UpperCamelCase )) + [1]
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None )->List[int]:
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase__ ( self : List[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int]=False , **__UpperCamelCase : List[Any] )->Dict:
_UpperCAmelCase = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__UpperCamelCase ) > 0 and not text[0].isspace()):
_UpperCAmelCase = """ """ + text
return (text, kwargs)
def lowercase__ ( self : List[str] , __UpperCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[bool] = None , )->dict:
_UpperCAmelCase = super()._pad(
encoded_inputs=__UpperCamelCase , max_length=__UpperCamelCase , padding_strategy=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_attention_mask=__UpperCamelCase , )
# Load from model defaults
if return_attention_mask is None:
_UpperCAmelCase = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_UpperCAmelCase = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_UpperCAmelCase = len(encoded_inputs['''global_attention_mask'''] ) != len(__UpperCamelCase )
if needs_to_be_padded:
_UpperCAmelCase = len(__UpperCamelCase ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_UpperCAmelCase = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
_UpperCAmelCase = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 260 |
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
__a = {
"distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"bert": (BertConfig, BertForMaskedLM, BertTokenizer),
"gpt2": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def A_ ( _lowercase ):
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
if args.student_type == "roberta":
snake_case_ :Tuple = False
elif args.student_type == "gpt2":
snake_case_ :Union[str, Any] = False
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
if args.student_type == "roberta":
snake_case_ :List[str] = False
def A_ ( ):
'''simple docstring'''
snake_case_ :Union[str, Any] = argparse.ArgumentParser(description="""Training""" )
parser.add_argument("""--force""", action="""store_true""", help="""Overwrite dump_path if it already exists.""" )
parser.add_argument(
"""--dump_path""", type=_lowercase, required=_lowercase, help="""The output directory (log, checkpoints, parameters, etc.)""" )
parser.add_argument(
"""--data_file""", type=_lowercase, required=_lowercase, help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""", )
parser.add_argument(
"""--student_type""", type=_lowercase, choices=["""distilbert""", """roberta""", """gpt2"""], required=_lowercase, help="""The student type (DistilBERT, RoBERTa).""", )
parser.add_argument("""--student_config""", type=_lowercase, required=_lowercase, help="""Path to the student configuration.""" )
parser.add_argument(
"""--student_pretrained_weights""", default=_lowercase, type=_lowercase, help="""Load student initialization checkpoint.""" )
parser.add_argument(
"""--teacher_type""", choices=["""bert""", """roberta""", """gpt2"""], required=_lowercase, help="""Teacher type (BERT, RoBERTa).""" )
parser.add_argument("""--teacher_name""", type=_lowercase, required=_lowercase, help="""The teacher model.""" )
parser.add_argument("""--temperature""", default=2.0, type=_lowercase, help="""Temperature for the softmax temperature.""" )
parser.add_argument(
"""--alpha_ce""", default=0.5, type=_lowercase, help="""Linear weight for the distillation loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_mlm""", default=0.0, type=_lowercase, help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""", )
parser.add_argument("""--alpha_clm""", default=0.5, type=_lowercase, help="""Linear weight for the CLM loss. Must be >=0.""" )
parser.add_argument("""--alpha_mse""", default=0.0, type=_lowercase, help="""Linear weight of the MSE loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_cos""", default=0.0, type=_lowercase, help="""Linear weight of the cosine embedding loss. Must be >=0.""" )
parser.add_argument(
"""--mlm""", action="""store_true""", help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""" )
parser.add_argument(
"""--mlm_mask_prop""", default=0.15, type=_lowercase, help="""Proportion of tokens for which we need to make a prediction.""", )
parser.add_argument("""--word_mask""", default=0.8, type=_lowercase, help="""Proportion of tokens to mask out.""" )
parser.add_argument("""--word_keep""", default=0.1, type=_lowercase, help="""Proportion of tokens to keep.""" )
parser.add_argument("""--word_rand""", default=0.1, type=_lowercase, help="""Proportion of tokens to randomly replace.""" )
parser.add_argument(
"""--mlm_smoothing""", default=0.7, type=_lowercase, help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""", )
parser.add_argument("""--token_counts""", type=_lowercase, help="""The token counts in the data_file for MLM.""" )
parser.add_argument(
"""--restrict_ce_to_mask""", action="""store_true""", help="""If true, compute the distillation loss only the [MLM] prediction distribution.""", )
parser.add_argument(
"""--freeze_pos_embs""", action="""store_true""", help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""", )
parser.add_argument(
"""--freeze_token_type_embds""", action="""store_true""", help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""", )
parser.add_argument("""--n_epoch""", type=_lowercase, default=3, help="""Number of pass on the whole dataset.""" )
parser.add_argument("""--batch_size""", type=_lowercase, default=5, help="""Batch size (for each process).""" )
parser.add_argument(
"""--group_by_size""", action="""store_false""", help="""If true, group sequences that have similar length into the same batch. Default is true.""", )
parser.add_argument(
"""--gradient_accumulation_steps""", type=_lowercase, default=50, help="""Gradient accumulation for larger training batches.""", )
parser.add_argument("""--warmup_prop""", default=0.05, type=_lowercase, help="""Linear warmup proportion.""" )
parser.add_argument("""--weight_decay""", default=0.0, type=_lowercase, help="""Weight decay if we apply some.""" )
parser.add_argument("""--learning_rate""", default=5e-4, type=_lowercase, help="""The initial learning rate for Adam.""" )
parser.add_argument("""--adam_epsilon""", default=1e-6, type=_lowercase, help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--max_grad_norm""", default=5.0, type=_lowercase, help="""Max gradient norm.""" )
parser.add_argument("""--initializer_range""", default=0.02, type=_lowercase, help="""Random initialization range.""" )
parser.add_argument(
"""--fp16""", action="""store_true""", help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""", )
parser.add_argument(
"""--fp16_opt_level""", type=_lowercase, default="""O1""", help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
), )
parser.add_argument("""--n_gpu""", type=_lowercase, default=1, help="""Number of GPUs in the node.""" )
parser.add_argument("""--local_rank""", type=_lowercase, default=-1, help="""Distributed training - Local rank""" )
parser.add_argument("""--seed""", type=_lowercase, default=56, help="""Random seed""" )
parser.add_argument("""--log_interval""", type=_lowercase, default=500, help="""Tensorboard logging interval.""" )
parser.add_argument("""--checkpoint_interval""", type=_lowercase, default=4000, help="""Checkpoint interval.""" )
snake_case_ :Tuple = parser.parse_args()
sanity_checks(_lowercase )
# ARGS #
init_gpu_params(_lowercase )
set_seed(_lowercase )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"""
""" itUse `--force` if you want to overwrite it""" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f"""Experiment will be dumped and logged in {args.dump_path}""" )
# SAVE PARAMS #
logger.info(f"""Param: {args}""" )
with open(os.path.join(args.dump_path, """parameters.json""" ), """w""" ) as f:
json.dump(vars(_lowercase ), _lowercase, indent=4 )
git_log(args.dump_path )
snake_case_, snake_case_, snake_case_ :Any = MODEL_CLASSES[args.student_type]
snake_case_, snake_case_, snake_case_ :int = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
snake_case_ :Any = teacher_tokenizer_class.from_pretrained(args.teacher_name )
snake_case_ :Optional[Any] = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
snake_case_ :Union[str, Any] = tokenizer.all_special_tokens.index(_lowercase )
snake_case_ :Union[str, Any] = tokenizer.all_special_ids[idx]
logger.info(f"""Special tokens {special_tok_ids}""" )
snake_case_ :str = special_tok_ids
snake_case_ :Any = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f"""Loading data from {args.data_file}""" )
with open(args.data_file, """rb""" ) as fp:
snake_case_ :str = pickle.load(_lowercase )
if args.mlm:
logger.info(f"""Loading token counts from {args.token_counts} (already pre-computed)""" )
with open(args.token_counts, """rb""" ) as fp:
snake_case_ :Optional[Any] = pickle.load(_lowercase )
snake_case_ :Tuple = np.maximum(_lowercase, 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
snake_case_ :Optional[int] = 0.0 # do not predict special tokens
snake_case_ :int = torch.from_numpy(_lowercase )
else:
snake_case_ :List[str] = None
snake_case_ :Optional[int] = LmSeqsDataset(params=_lowercase, data=_lowercase )
logger.info("""Data loader created.""" )
# STUDENT #
logger.info(f"""Loading student config from {args.student_config}""" )
snake_case_ :List[Any] = student_config_class.from_pretrained(args.student_config )
snake_case_ :Union[str, Any] = True
if args.student_pretrained_weights is not None:
logger.info(f"""Loading pretrained weights from {args.student_pretrained_weights}""" )
snake_case_ :List[str] = student_model_class.from_pretrained(args.student_pretrained_weights, config=_lowercase )
else:
snake_case_ :Optional[int] = student_model_class(_lowercase )
if args.n_gpu > 0:
student.to(f"""cuda:{args.local_rank}""" )
logger.info("""Student loaded.""" )
# TEACHER #
snake_case_ :Dict = teacher_model_class.from_pretrained(args.teacher_name, output_hidden_states=_lowercase )
if args.n_gpu > 0:
teacher.to(f"""cuda:{args.local_rank}""" )
logger.info(f"""Teacher loaded from {args.teacher_name}.""" )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(_lowercase, _lowercase )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(_lowercase, _lowercase )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
snake_case_ :Optional[int] = Distiller(
params=_lowercase, dataset=_lowercase, token_probs=_lowercase, student=_lowercase, teacher=_lowercase )
distiller.train()
logger.info("""Let's go get some drinks.""" )
if __name__ == "__main__":
main()
| 66 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : Tuple ={
"""configuration_mobilebert""": [
"""MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""MobileBertConfig""",
"""MobileBertOnnxConfig""",
],
"""tokenization_mobilebert""": ["""MobileBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[Any] =["""MobileBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : str =[
"""MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileBertForMaskedLM""",
"""MobileBertForMultipleChoice""",
"""MobileBertForNextSentencePrediction""",
"""MobileBertForPreTraining""",
"""MobileBertForQuestionAnswering""",
"""MobileBertForSequenceClassification""",
"""MobileBertForTokenClassification""",
"""MobileBertLayer""",
"""MobileBertModel""",
"""MobileBertPreTrainedModel""",
"""load_tf_weights_in_mobilebert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : str =[
"""TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileBertForMaskedLM""",
"""TFMobileBertForMultipleChoice""",
"""TFMobileBertForNextSentencePrediction""",
"""TFMobileBertForPreTraining""",
"""TFMobileBertForQuestionAnswering""",
"""TFMobileBertForSequenceClassification""",
"""TFMobileBertForTokenClassification""",
"""TFMobileBertMainLayer""",
"""TFMobileBertModel""",
"""TFMobileBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Dict =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 262 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue_model_parallelism.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_6_0_0, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_6_0_0, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
] )
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self: Any ) -> str:
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=snake_case , )
assert hasattr(self , """env""" )
def lowerCAmelCase_ ( self: int , snake_case: Dict ) -> List[Any]:
# configuration for running training on smdistributed Model Parallel
snake_case_ :Tuple = {
"""enabled""": True,
"""processes_per_host""": 8,
}
snake_case_ :List[Any] = {
"""enabled""": True,
"""parameters""": {
"""microbatches""": 4,
"""placement_strategy""": """spread""",
"""pipeline""": """interleaved""",
"""optimize""": """speed""",
"""partitions""": 4,
"""ddp""": True,
},
}
snake_case_ :Tuple = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options}
snake_case_ :Any = """trainer""" if self.script == """run_glue.py""" else """smtrainer"""
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=snake_case , instance_type=self.instance_type , debugger_hook_config=snake_case , hyperparameters={
**self.env.hyperparameters,
"""model_name_or_path""": self.model_name_or_path,
"""max_steps""": 500,
} , metric_definitions=self.env.metric_definitions , distribution=snake_case , py_version="""py36""" , )
def lowerCAmelCase_ ( self: Any , snake_case: Tuple ) -> List[str]:
TrainingJobAnalytics(snake_case ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def lowerCAmelCase_ ( self: Dict , snake_case: Dict ) -> List[Any]:
# create estimator
snake_case_ :List[Any] = self.create_estimator(snake_case )
# run training
estimator.fit()
# result dataframe
snake_case_ :Any = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
snake_case_ :Tuple = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
snake_case_ :Dict = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
snake_case_ :int = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , snake_case )
| 66 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _a ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : int = StableDiffusionPanoramaPipeline
_lowerCamelCase : Optional[Any] = TEXT_TO_IMAGE_PARAMS
_lowerCamelCase : List[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
_lowerCamelCase : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
_lowerCamelCase : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
def __A ( self : List[Any] ):
torch.manual_seed(0 )
A_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
A_ = DDIMScheduler()
torch.manual_seed(0 )
A_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
A_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
A_ = CLIPTextModel(UpperCAmelCase )
A_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
A_ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __A ( self : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str]=0 ):
A_ = torch.manual_seed(UpperCAmelCase )
A_ = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
# Setting height and width to None to prevent OOMs on CPU.
"""height""": None,
"""width""": None,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def __A ( self : Union[str, Any] ):
A_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
A_ = self.get_dummy_components()
A_ = StableDiffusionPanoramaPipeline(**UpperCAmelCase )
A_ = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
A_ = self.get_dummy_inputs(UpperCAmelCase )
A_ = sd_pipe(**UpperCAmelCase ).images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A_ = np.array([0.6_186, 0.5_374, 0.4_915, 0.4_135, 0.4_114, 0.4_563, 0.5_128, 0.4_977, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __A ( self : Optional[int] ):
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def __A ( self : Any ):
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25E-3 )
def __A ( self : Dict ):
A_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
A_ = self.get_dummy_components()
A_ = StableDiffusionPanoramaPipeline(**UpperCAmelCase )
A_ = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
A_ = self.get_dummy_inputs(UpperCAmelCase )
A_ = """french fries"""
A_ = sd_pipe(**UpperCAmelCase , negative_prompt=UpperCAmelCase )
A_ = output.images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A_ = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __A ( self : Tuple ):
A_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
A_ = self.get_dummy_components()
A_ = StableDiffusionPanoramaPipeline(**UpperCAmelCase )
A_ = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
A_ = self.get_dummy_inputs(UpperCAmelCase )
A_ = sd_pipe(**UpperCAmelCase , view_batch_size=2 )
A_ = output.images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A_ = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __A ( self : Dict ):
A_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
A_ = self.get_dummy_components()
A_ = EulerAncestralDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" )
A_ = StableDiffusionPanoramaPipeline(**UpperCAmelCase )
A_ = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
A_ = self.get_dummy_inputs(UpperCAmelCase )
A_ = sd_pipe(**UpperCAmelCase ).images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A_ = np.array([0.4_024, 0.6_510, 0.4_901, 0.5_378, 0.5_813, 0.5_622, 0.4_795, 0.4_467, 0.4_952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __A ( self : Optional[Any] ):
A_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
A_ = self.get_dummy_components()
A_ = PNDMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , skip_prk_steps=UpperCAmelCase )
A_ = StableDiffusionPanoramaPipeline(**UpperCAmelCase )
A_ = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
A_ = self.get_dummy_inputs(UpperCAmelCase )
A_ = sd_pipe(**UpperCAmelCase ).images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A_ = np.array([0.6_391, 0.6_291, 0.4_861, 0.5_134, 0.5_552, 0.4_578, 0.5_032, 0.5_023, 0.4_539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : Dict ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self : Any , UpperCAmelCase : Union[str, Any]=0 ):
A_ = torch.manual_seed(UpperCAmelCase )
A_ = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def __A ( self : Union[str, Any] ):
A_ = """stabilityai/stable-diffusion-2-base"""
A_ = DDIMScheduler.from_pretrained(UpperCAmelCase , subfolder="scheduler" )
A_ = StableDiffusionPanoramaPipeline.from_pretrained(UpperCAmelCase , scheduler=UpperCAmelCase , safety_checker=UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
A_ = self.get_inputs()
A_ = pipe(**UpperCAmelCase ).images
A_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
A_ = np.array(
[
0.36_968_392,
0.27_025_372,
0.32_446_766,
0.28_379_387,
0.36_363_274,
0.30_733_347,
0.27_100_027,
0.27_054_125,
0.25_536_096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-2
def __A ( self : int ):
A_ = StableDiffusionPanoramaPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-base" , safety_checker=UpperCAmelCase )
A_ = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
A_ = self.get_inputs()
A_ = pipe(**UpperCAmelCase ).images
A_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
A_ = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __A ( self : List[Any] ):
A_ = 0
def callback_fn(UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : torch.FloatTensor ) -> None:
A_ = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
A_ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
A_ = latents[0, -3:, -3:, -1]
A_ = np.array(
[
0.18_681_869,
0.33_907_816,
0.5_361_276,
0.14_432_865,
-0.02_856_611,
-0.73_941_123,
0.23_397_987,
0.47_322_682,
-0.37_823_164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
A_ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
A_ = latents[0, -3:, -3:, -1]
A_ = np.array(
[
0.18_539_645,
0.33_987_248,
0.5_378_559,
0.14_437_142,
-0.02_455_261,
-0.7_338_317,
0.23_990_755,
0.47_356_272,
-0.3_786_505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
A_ = False
A_ = """stabilityai/stable-diffusion-2-base"""
A_ = DDIMScheduler.from_pretrained(UpperCAmelCase , subfolder="scheduler" )
A_ = StableDiffusionPanoramaPipeline.from_pretrained(UpperCAmelCase , scheduler=UpperCAmelCase , safety_checker=UpperCAmelCase )
A_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
A_ = self.get_inputs()
pipe(**UpperCAmelCase , callback=UpperCAmelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __A ( self : Tuple ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A_ = """stabilityai/stable-diffusion-2-base"""
A_ = DDIMScheduler.from_pretrained(UpperCAmelCase , subfolder="scheduler" )
A_ = StableDiffusionPanoramaPipeline.from_pretrained(UpperCAmelCase , scheduler=UpperCAmelCase , safety_checker=UpperCAmelCase )
A_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
A_ = self.get_inputs()
A_ = pipe(**UpperCAmelCase )
A_ = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9 | 312 |
"""simple docstring"""
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class lowerCamelCase :
'''simple docstring'''
def __init__( self: Dict , snake_case: Optional[Any] , snake_case: Tuple=13 , snake_case: Any=32 , snake_case: Union[str, Any]=2 , snake_case: Tuple=3 , snake_case: Union[str, Any]=16 , snake_case: Union[str, Any]=[1, 2, 1] , snake_case: Optional[Any]=[2, 2, 4] , snake_case: str=2 , snake_case: List[str]=2.0 , snake_case: Optional[int]=True , snake_case: Union[str, Any]=0.0 , snake_case: Optional[int]=0.0 , snake_case: Optional[Any]=0.1 , snake_case: List[str]="gelu" , snake_case: Any=False , snake_case: Optional[Any]=True , snake_case: Optional[int]=0.0_2 , snake_case: Any=1E-5 , snake_case: Optional[int]=True , snake_case: int=None , snake_case: Any=True , snake_case: str=10 , snake_case: Optional[Any]=8 , snake_case: Union[str, Any]=["stage1", "stage2", "stage3"] , snake_case: Tuple=[1, 2, 3] , ) -> Dict:
snake_case_ :Dict = parent
snake_case_ :List[Any] = batch_size
snake_case_ :Dict = image_size
snake_case_ :Dict = patch_size
snake_case_ :Tuple = num_channels
snake_case_ :List[Any] = embed_dim
snake_case_ :List[str] = depths
snake_case_ :str = num_heads
snake_case_ :Tuple = window_size
snake_case_ :Tuple = mlp_ratio
snake_case_ :int = qkv_bias
snake_case_ :Tuple = hidden_dropout_prob
snake_case_ :Optional[Any] = attention_probs_dropout_prob
snake_case_ :Dict = drop_path_rate
snake_case_ :Any = hidden_act
snake_case_ :Any = use_absolute_embeddings
snake_case_ :int = patch_norm
snake_case_ :List[Any] = layer_norm_eps
snake_case_ :Tuple = initializer_range
snake_case_ :str = is_training
snake_case_ :int = scope
snake_case_ :Tuple = use_labels
snake_case_ :Tuple = type_sequence_label_size
snake_case_ :str = encoder_stride
snake_case_ :List[Any] = out_features
snake_case_ :str = out_indices
def lowerCAmelCase_ ( self: Tuple ) -> Dict:
snake_case_ :Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ :str = None
if self.use_labels:
snake_case_ :Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ :Union[str, Any] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ ( self: int ) -> Optional[Any]:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def lowerCAmelCase_ ( self: List[Any] , snake_case: str , snake_case: int , snake_case: List[str] ) -> Any:
snake_case_ :Dict = MaskFormerSwinModel(config=snake_case )
model.to(snake_case )
model.eval()
snake_case_ :Tuple = model(snake_case )
snake_case_ :Dict = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
snake_case_ :Any = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowerCAmelCase_ ( self: Optional[Any] , snake_case: int , snake_case: List[str] , snake_case: Tuple ) -> Union[str, Any]:
snake_case_ :Any = MaskFormerSwinBackbone(config=snake_case )
model.to(snake_case )
model.eval()
snake_case_ :Optional[Any] = model(snake_case )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(snake_case ):
snake_case_ :Optional[Any] = ["""stem"""]
snake_case_ :str = MaskFormerSwinBackbone(config=snake_case )
def lowerCAmelCase_ ( self: List[str] ) -> Optional[Any]:
snake_case_ :Optional[int] = self.prepare_config_and_inputs()
snake_case_, snake_case_, snake_case_ :str = config_and_inputs
snake_case_ :Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_A : Union[str, Any] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
_A : str = {"""feature-extraction""": MaskFormerSwinModel} if is_torch_available() else {}
_A : List[str] = False
_A : Any = False
_A : Dict = False
_A : List[Any] = False
_A : Optional[int] = False
def lowerCAmelCase_ ( self: Dict ) -> Any:
snake_case_ :str = MaskFormerSwinModelTester(self )
snake_case_ :Optional[Any] = ConfigTester(self , config_class=snake_case , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def lowerCAmelCase_ ( self: List[str] ) -> Optional[int]:
pass
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase_ ( self: Any ) -> Tuple:
return
def lowerCAmelCase_ ( self: Any ) -> Any:
snake_case_ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def lowerCAmelCase_ ( self: Union[str, Any] ) -> int:
snake_case_ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*snake_case )
@unittest.skip("""Swin does not use inputs_embeds""" )
def lowerCAmelCase_ ( self: str ) -> List[str]:
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def lowerCAmelCase_ ( self: int ) -> Optional[int]:
pass
def lowerCAmelCase_ ( self: List[str] ) -> List[Any]:
snake_case_, snake_case_ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ :str = model_class(snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case_ :Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case , nn.Linear ) )
def lowerCAmelCase_ ( self: Tuple ) -> Dict:
snake_case_, snake_case_ :int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ :Optional[int] = model_class(snake_case )
snake_case_ :str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ :str = [*signature.parameters.keys()]
snake_case_ :str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def lowerCAmelCase_ ( self: List[Any] ) -> List[Any]:
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def lowerCAmelCase_ ( self: Dict ) -> List[Any]:
pass
def lowerCAmelCase_ ( self: Union[str, Any] , snake_case: Union[str, Any] , snake_case: int , snake_case: Any , snake_case: List[str] ) -> str:
snake_case_ :List[str] = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
snake_case_ :List[Any] = model(**self._prepare_for_class(snake_case , snake_case ) )
snake_case_ :Any = outputs.hidden_states
snake_case_ :Optional[int] = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(snake_case ) , snake_case )
# Swin has a different seq_length
snake_case_ :str = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case_ :int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowerCAmelCase_ ( self: List[Any] ) -> Optional[int]:
snake_case_, snake_case_ :Any = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ :List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
snake_case_ :Tuple = True
self.check_hidden_states_output(snake_case , snake_case , snake_case , snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ :List[Any] = True
self.check_hidden_states_output(snake_case , snake_case , snake_case , snake_case )
def lowerCAmelCase_ ( self: Optional[Any] ) -> Tuple:
snake_case_, snake_case_ :int = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ :List[Any] = 3
snake_case_ :List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
snake_case_ :Any = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case_ :Tuple = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
snake_case_ :List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
snake_case_ :str = True
self.check_hidden_states_output(snake_case , snake_case , snake_case , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ :Any = True
self.check_hidden_states_output(snake_case , snake_case , snake_case , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def lowerCAmelCase_ ( self: Union[str, Any] ) -> List[str]:
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def lowerCAmelCase_ ( self: List[str] ) -> str:
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def lowerCAmelCase_ ( self: str ) -> List[Any]:
pass
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Optional[Any]:
snake_case_, snake_case_ :Dict = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(snake_case: str ):
snake_case_ :Optional[int] = 0
return t
def check_equivalence(snake_case: List[Any] , snake_case: Union[str, Any] , snake_case: int , snake_case: Tuple={} ):
with torch.no_grad():
snake_case_ :List[Any] = model(**snake_case , return_dict=snake_case , **snake_case )
snake_case_ :Any = model(**snake_case , return_dict=snake_case , **snake_case ).to_tuple()
def recursive_check(snake_case: List[Any] , snake_case: int ):
if isinstance(snake_case , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(snake_case , snake_case ):
recursive_check(snake_case , snake_case )
elif isinstance(snake_case , snake_case ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(snake_case , snake_case )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(snake_case ) , set_nan_tensor_to_zero(snake_case ) , atol=1E-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
f""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"""
f""" {torch.isnan(snake_case ).any()} and `inf`: {torch.isinf(snake_case )}. Dict has"""
f""" `nan`: {torch.isnan(snake_case ).any()} and `inf`: {torch.isinf(snake_case )}."""
) , )
recursive_check(snake_case , snake_case )
for model_class in self.all_model_classes:
snake_case_ :int = model_class(snake_case )
model.to(snake_case )
model.eval()
snake_case_ :Any = self._prepare_for_class(snake_case , snake_case )
snake_case_ :List[Any] = self._prepare_for_class(snake_case , snake_case )
check_equivalence(snake_case , snake_case , snake_case )
snake_case_ :Tuple = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
snake_case_ :Dict = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
check_equivalence(snake_case , snake_case , snake_case )
snake_case_ :Tuple = self._prepare_for_class(snake_case , snake_case )
snake_case_ :Any = self._prepare_for_class(snake_case , snake_case )
check_equivalence(snake_case , snake_case , snake_case , {"""output_hidden_states""": True} )
snake_case_ :Dict = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
snake_case_ :List[str] = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
check_equivalence(snake_case , snake_case , snake_case , {"""output_hidden_states""": True} )
@require_torch
class lowerCamelCase ( unittest.TestCase , _lowerCAmelCase ):
'''simple docstring'''
_A : int = (MaskFormerSwinBackbone,) if is_torch_available() else ()
_A : Tuple = MaskFormerSwinConfig
def lowerCAmelCase_ ( self: List[str] ) -> Optional[int]:
snake_case_ :Optional[Any] = MaskFormerSwinModelTester(self )
def lowerCAmelCase_ ( self: int ) -> Optional[int]:
snake_case_, snake_case_ :Any = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ :Tuple = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
snake_case_ :List[str] = backbone_class(snake_case )
backbone.to(snake_case )
backbone.eval()
snake_case_ :List[Any] = backbone(**snake_case )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , snake_case )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
snake_case_ :Union[str, Any] = backbone(**snake_case , output_hidden_states=snake_case )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
snake_case_, snake_case_, snake_case_ :List[Any] = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
snake_case_ :List[Any] = backbone(**snake_case , output_attentions=snake_case )
self.assertIsNotNone(outputs.attentions )
| 66 | 0 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def lowerCAmelCase__ ( ) -> Optional[int]:
lowerCAmelCase__ : Optional[int] = """https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg"""
lowerCAmelCase__ : Optional[Any] = Image.open(requests.get(_lowercase , stream=_lowercase ).raw ).convert('RGB' )
return image
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
lowerCAmelCase__ : str = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.embeddings.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.embeddings.layernorm.bias') )
# fmt: on
return rename_keys
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
lowerCAmelCase__ : str = dct.pop(_lowercase )
lowerCAmelCase__ : Dict = val
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
lowerCAmelCase__ : Dict = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
lowerCAmelCase__ : Tuple = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
lowerCAmelCase__ : Union[str, Any] = torch.cat((q_bias, torch.zeros_like(_lowercase , requires_grad=_lowercase ), v_bias) )
lowerCAmelCase__ : int = qkv_bias
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
lowerCAmelCase__ : List[str] = 364 if """coco""" in model_name else 224
lowerCAmelCase__ : Tuple = InstructBlipVisionConfig(image_size=_lowercase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
lowerCAmelCase__ : Optional[Any] = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
lowerCAmelCase__ : List[Any] = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
lowerCAmelCase__ : Optional[int] = LlamaConfig.from_pretrained('decapoda-research/llama-7b-hf' , vocab_size=32_001 ).to_dict()
elif "vicuna-13b" in model_name:
lowerCAmelCase__ : Optional[Any] = LlamaConfig.from_pretrained('decapoda-research/llama-13b-hf' , vocab_size=32_001 ).to_dict()
else:
raise ValueError('Model name not supported' )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
lowerCAmelCase__ : Optional[int] = InstructBlipQFormerConfig(vocab_size=30_523 ).to_dict()
lowerCAmelCase__ : List[str] = InstructBlipConfig(vision_config=_lowercase , text_config=_lowercase , qformer_config=_lowercase )
return config, image_size
@torch.no_grad()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False ) -> Union[str, Any]:
lowerCAmelCase__ : int = AutoTokenizer.from_pretrained('bert-base-uncased' , truncation_side='left' )
qformer_tokenizer.add_special_tokens({'bos_token': '[DEC]'} )
if "t5" in model_name:
lowerCAmelCase__ : Optional[int] = TaTokenizerFast.from_pretrained('google/flan-t5-xl' , truncation_side='left' )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
lowerCAmelCase__ : str = LlamaTokenizerFast.from_pretrained(
'huggyllama/llama-7b' , truncation_side='left' , bos_token='</s>' , unk_token='</s>' )
tokenizer.add_special_tokens({'pad_token': '[PAD]'} )
lowerCAmelCase__ : Union[str, Any] = get_blipa_config(_lowercase )
lowerCAmelCase__ : Union[str, Any] = InstructBlipForConditionalGeneration(_lowercase ).eval()
lowerCAmelCase__ : Union[str, Any] = {
"""instructblip-vicuna-7b""": ("""blip2_vicuna_instruct""", """vicuna7b"""),
"""instructblip-vicuna-13b""": ("""blip2_vicuna_instruct""", """vicuna13b"""),
"""instructblip-flan-t5-xl""": ("""blip2_t5_instruct""", """flant5xl"""),
"""instructblip-flan-t5-xxl""": ("""blip2_t5_instruct""", """flant5xxl"""),
}
lowerCAmelCase__ : List[Any] = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
lowerCAmelCase__ : Optional[Any] = """cuda:1""" if torch.cuda.is_available() else """cpu"""
lowerCAmelCase__ : Any = """cuda:2""" if torch.cuda.is_available() else """cpu"""
lowerCAmelCase__ : int = load_model_and_preprocess(
name=_lowercase , model_type=_lowercase , is_eval=_lowercase , device=_lowercase )
original_model.eval()
print('Done!' )
# update state dict keys
lowerCAmelCase__ : int = original_model.state_dict()
lowerCAmelCase__ : int = create_rename_keys(_lowercase )
for src, dest in rename_keys:
rename_key(_lowercase , _lowercase , _lowercase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
lowerCAmelCase__ : Optional[Any] = state_dict.pop(_lowercase )
if key.startswith('Qformer.bert' ):
lowerCAmelCase__ : str = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
lowerCAmelCase__ : int = key.replace('self' , 'attention' )
if "llm_proj" in key:
lowerCAmelCase__ : Dict = key.replace('llm_proj' , 'language_projection' )
if "t5_proj" in key:
lowerCAmelCase__ : Dict = key.replace('t5_proj' , 'language_projection' )
if key.startswith('llm_model' ):
lowerCAmelCase__ : Union[str, Any] = key.replace('llm_model' , 'language_model' )
if key.startswith('t5' ):
lowerCAmelCase__ : List[str] = key.replace('t5' , 'language' )
lowerCAmelCase__ : List[str] = val
# read in qv biases
read_in_q_v_bias(_lowercase , _lowercase )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(_lowercase , strict=_lowercase )
lowerCAmelCase__ : Optional[Any] = load_demo_image()
lowerCAmelCase__ : Tuple = """What is unusual about this image?"""
# create processor
lowerCAmelCase__ : Tuple = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=_lowercase , image_std=_lowercase )
lowerCAmelCase__ : Tuple = InstructBlipProcessor(
image_processor=_lowercase , tokenizer=_lowercase , qformer_tokenizer=_lowercase , )
lowerCAmelCase__ : Tuple = processor(images=_lowercase , text=_lowercase , return_tensors='pt' ).to(_lowercase )
# make sure processor creates exact same pixel values
lowerCAmelCase__ : Optional[Any] = vis_processors["""eval"""](_lowercase ).unsqueeze(0 ).to(_lowercase )
lowerCAmelCase__ : List[str] = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , _lowercase )
original_model.to(_lowercase )
hf_model.to(_lowercase )
with torch.no_grad():
if "vicuna" in model_name:
lowerCAmelCase__ : List[Any] = original_model({'image': original_pixel_values, 'text_input': [prompt]} ).logits
lowerCAmelCase__ : Union[str, Any] = hf_model(**_lowercase ).logits
else:
lowerCAmelCase__ : Dict = original_model(
{'image': original_pixel_values, 'text_input': [prompt], 'text_output': ['\n']} ).logits
lowerCAmelCase__ : Dict = tokenizer('\n' , return_tensors='pt' ).input_ids.to(_lowercase )
lowerCAmelCase__ : List[str] = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 )
lowerCAmelCase__ : List[Any] = hf_model(**_lowercase , labels=_lowercase ).logits
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
lowerCAmelCase__ : Any = 1e-4 if """vicuna""" in model_name else 1e-5
assert torch.allclose(original_logits.to(logits.device ) , _lowercase , atol=_lowercase )
print('Looks ok!' )
print('Generating with original model...' )
lowerCAmelCase__ : Dict = original_model.generate({'image': original_pixel_values, 'prompt': prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print('Generating with HF model...' )
lowerCAmelCase__ : str = hf_model.generate(
**_lowercase , do_sample=_lowercase , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
lowerCAmelCase__ : Optional[int] = 2
print('Original generation:' , _lowercase )
lowerCAmelCase__ : str = processor.batch_decode(_lowercase , skip_special_tokens=_lowercase )
lowerCAmelCase__ : Any = [text.strip() for text in output_text]
print('HF generation:' , _lowercase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_lowercase )
hf_model.save_pretrained(_lowercase )
if push_to_hub:
processor.push_to_hub(F'''Salesforce/{model_name}''' )
hf_model.push_to_hub(F'''Salesforce/{model_name}''' )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
lowerCamelCase__ = [
"""instructblip-vicuna-7b""",
"""instructblip-vicuna-13b""",
"""instructblip-flan-t5-xl""",
"""instructblip-flan-t5-xxl""",
]
parser.add_argument(
"""--model_name""",
default="""instructblip-flan-t5-xl""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
lowerCamelCase__ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 212 |
"""simple docstring"""
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
__a = logging.get_logger(__name__)
enable_full_determinism()
class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_A : List[Any] = UNetaDModel
_A : Union[str, Any] = """sample"""
@property
def lowerCAmelCase_ ( self: str ) -> Tuple:
snake_case_ :List[str] = 4
snake_case_ :Tuple = 3
snake_case_ :Optional[Any] = (32, 32)
snake_case_ :str = floats_tensor((batch_size, num_channels) + sizes ).to(snake_case )
snake_case_ :Union[str, Any] = torch.tensor([10] ).to(snake_case )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase_ ( self: List[str] ) -> Dict:
return (3, 32, 32)
@property
def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[int]:
return (3, 32, 32)
def lowerCAmelCase_ ( self: Optional[int] ) -> Dict:
snake_case_ :Any = {
"""block_out_channels""": (32, 64),
"""down_block_types""": ("""DownBlock2D""", """AttnDownBlock2D"""),
"""up_block_types""": ("""AttnUpBlock2D""", """UpBlock2D"""),
"""attention_head_dim""": 3,
"""out_channels""": 3,
"""in_channels""": 3,
"""layers_per_block""": 2,
"""sample_size""": 32,
}
snake_case_ :Tuple = self.dummy_input
return init_dict, inputs_dict
class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_A : List[str] = UNetaDModel
_A : Union[str, Any] = """sample"""
@property
def lowerCAmelCase_ ( self: str ) -> str:
snake_case_ :List[str] = 4
snake_case_ :Optional[int] = 4
snake_case_ :int = (32, 32)
snake_case_ :Any = floats_tensor((batch_size, num_channels) + sizes ).to(snake_case )
snake_case_ :List[Any] = torch.tensor([10] ).to(snake_case )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[int]:
return (4, 32, 32)
@property
def lowerCAmelCase_ ( self: List[Any] ) -> int:
return (4, 32, 32)
def lowerCAmelCase_ ( self: Union[str, Any] ) -> List[Any]:
snake_case_ :Dict = {
"""sample_size""": 32,
"""in_channels""": 4,
"""out_channels""": 4,
"""layers_per_block""": 2,
"""block_out_channels""": (32, 64),
"""attention_head_dim""": 32,
"""down_block_types""": ("""DownBlock2D""", """DownBlock2D"""),
"""up_block_types""": ("""UpBlock2D""", """UpBlock2D"""),
}
snake_case_ :List[str] = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[Any]:
snake_case_, snake_case_ :List[str] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=snake_case )
self.assertIsNotNone(snake_case )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(snake_case )
snake_case_ :List[str] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def lowerCAmelCase_ ( self: Tuple ) -> Dict:
snake_case_, snake_case_ :Union[str, Any] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=snake_case )
model.to(snake_case )
snake_case_ :Union[str, Any] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def lowerCAmelCase_ ( self: str ) -> Any:
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
snake_case_, snake_case_ :List[str] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=snake_case )
model_accelerate.to(snake_case )
model_accelerate.eval()
snake_case_ :List[Any] = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
snake_case_ :int = noise.to(snake_case )
snake_case_ :str = torch.tensor([10] * noise.shape[0] ).to(snake_case )
snake_case_ :Optional[int] = model_accelerate(snake_case , snake_case )["""sample"""]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
snake_case_, snake_case_ :str = UNetaDModel.from_pretrained(
"""fusing/unet-ldm-dummy-update""" , output_loading_info=snake_case , low_cpu_mem_usage=snake_case )
model_normal_load.to(snake_case )
model_normal_load.eval()
snake_case_ :int = model_normal_load(snake_case , snake_case )["""sample"""]
assert torch_all_close(snake_case , snake_case , rtol=1E-3 )
def lowerCAmelCase_ ( self: Tuple ) -> Any:
snake_case_ :Tuple = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" )
model.eval()
model.to(snake_case )
snake_case_ :Optional[int] = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
snake_case_ :int = noise.to(snake_case )
snake_case_ :List[Any] = torch.tensor([10] * noise.shape[0] ).to(snake_case )
with torch.no_grad():
snake_case_ :Union[str, Any] = model(snake_case , snake_case ).sample
snake_case_ :Optional[int] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
snake_case_ :Dict = torch.tensor([-1_3.3_2_5_8, -2_0.1_1_0_0, -1_5.9_8_7_3, -1_7.6_6_1_7, -2_3.0_5_9_6, -1_7.9_4_1_9, -1_3.3_6_7_5, -1_6.1_8_8_9, -1_2.3_8_0_0] )
# fmt: on
self.assertTrue(torch_all_close(snake_case , snake_case , rtol=1E-3 ) )
class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_A : List[Any] = UNetaDModel
_A : List[Any] = """sample"""
@property
def lowerCAmelCase_ ( self: Union[str, Any] , snake_case: int=(32, 32) ) -> Tuple:
snake_case_ :Union[str, Any] = 4
snake_case_ :Any = 3
snake_case_ :int = floats_tensor((batch_size, num_channels) + sizes ).to(snake_case )
snake_case_ :Any = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=snake_case )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Any:
return (3, 32, 32)
@property
def lowerCAmelCase_ ( self: int ) -> Tuple:
return (3, 32, 32)
def lowerCAmelCase_ ( self: List[str] ) -> Tuple:
snake_case_ :List[Any] = {
"""block_out_channels""": [32, 64, 64, 64],
"""in_channels""": 3,
"""layers_per_block""": 1,
"""out_channels""": 3,
"""time_embedding_type""": """fourier""",
"""norm_eps""": 1E-6,
"""mid_block_scale_factor""": math.sqrt(2.0 ),
"""norm_num_groups""": None,
"""down_block_types""": [
"""SkipDownBlock2D""",
"""AttnSkipDownBlock2D""",
"""SkipDownBlock2D""",
"""SkipDownBlock2D""",
],
"""up_block_types""": [
"""SkipUpBlock2D""",
"""SkipUpBlock2D""",
"""AttnSkipUpBlock2D""",
"""SkipUpBlock2D""",
],
}
snake_case_ :int = self.dummy_input
return init_dict, inputs_dict
@slow
def lowerCAmelCase_ ( self: Optional[Any] ) -> List[Any]:
snake_case_, snake_case_ :List[Any] = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" , output_loading_info=snake_case )
self.assertIsNotNone(snake_case )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(snake_case )
snake_case_ :Any = self.dummy_input
snake_case_ :int = floats_tensor((4, 3) + (256, 256) ).to(snake_case )
snake_case_ :int = noise
snake_case_ :int = model(**snake_case )
assert image is not None, "Make sure output is not None"
@slow
def lowerCAmelCase_ ( self: str ) -> Dict:
snake_case_ :Dict = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" )
model.to(snake_case )
snake_case_ :List[str] = 4
snake_case_ :Optional[int] = 3
snake_case_ :List[str] = (256, 256)
snake_case_ :Tuple = torch.ones((batch_size, num_channels) + sizes ).to(snake_case )
snake_case_ :str = torch.tensor(batch_size * [1E-4] ).to(snake_case )
with torch.no_grad():
snake_case_ :Dict = model(snake_case , snake_case ).sample
snake_case_ :int = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
snake_case_ :Optional[Any] = torch.tensor([-4_8_4_2.8_6_9_1, -6_4_9_9.6_6_3_1, -3_8_0_0.1_9_5_3, -7_9_7_8.2_6_8_6, -1_0_9_8_0.7_1_2_9, -2_0_0_2_8.8_5_3_5, 8_1_4_8.2_8_2_2, 2_3_4_2.2_9_0_5, 5_6_7.7_6_0_8] )
# fmt: on
self.assertTrue(torch_all_close(snake_case , snake_case , rtol=1E-2 ) )
def lowerCAmelCase_ ( self: List[str] ) -> List[Any]:
snake_case_ :Optional[Any] = UNetaDModel.from_pretrained("""fusing/ncsnpp-ffhq-ve-dummy-update""" )
model.to(snake_case )
snake_case_ :Optional[int] = 4
snake_case_ :Optional[Any] = 3
snake_case_ :Optional[Any] = (32, 32)
snake_case_ :Dict = torch.ones((batch_size, num_channels) + sizes ).to(snake_case )
snake_case_ :Any = torch.tensor(batch_size * [1E-4] ).to(snake_case )
with torch.no_grad():
snake_case_ :str = model(snake_case , snake_case ).sample
snake_case_ :int = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
snake_case_ :int = torch.tensor([-0.0_3_2_5, -0.0_9_0_0, -0.0_8_6_9, -0.0_3_3_2, -0.0_7_2_5, -0.0_2_7_0, -0.0_1_0_1, 0.0_2_2_7, 0.0_2_5_6] )
# fmt: on
self.assertTrue(torch_all_close(snake_case , snake_case , rtol=1E-2 ) )
def lowerCAmelCase_ ( self: Dict ) -> Optional[Any]:
# not required for this model
pass
| 66 | 0 |
from graphs.minimum_spanning_tree_kruskal import kruskal
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = 9
__lowerCamelCase = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
__lowerCamelCase = kruskal(_lowercase , _lowercase )
__lowerCamelCase = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(_lowercase ) == sorted(_lowercase )
| 12 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__a = {
"configuration_mask2former": [
"MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Mask2FormerConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["Mask2FormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"Mask2FormerForUniversalSegmentation",
"Mask2FormerModel",
"Mask2FormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 66 | 0 |
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
a__ : Any = ['''bart.large''', '''bart.large.mnli''', '''bart.large.cnn''', '''bart_xsum/model.pt''']
a__ : Union[str, Any] = {'''bart.large''': BartModel, '''bart.large.mnli''': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('''0.9.0'''):
raise Exception('''requires fairseq >= 0.9.0''')
logging.set_verbosity_info()
a__ : Tuple = logging.get_logger(__name__)
a__ : Union[str, Any] = ''' Hello world! cécé herlolip'''
a__ : Any = [
('''model.classification_heads.mnli.dense.weight''', '''classification_head.dense.weight'''),
('''model.classification_heads.mnli.dense.bias''', '''classification_head.dense.bias'''),
('''model.classification_heads.mnli.out_proj.weight''', '''classification_head.out_proj.weight'''),
('''model.classification_heads.mnli.out_proj.bias''', '''classification_head.out_proj.bias'''),
]
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""_float_tensor""",
]
for k in ignore_keys:
state_dict.pop(_lowercase , _lowercase )
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = dct.pop(_lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = val
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = torch.load(_lowercase , map_location='''cpu''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.hub.load('''pytorch/fairseq''' , '''bart.large.cnn''' ).eval()
hub_interface.model.load_state_dict(sd['''model'''] )
return hub_interface
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = emb.weight.shape
SCREAMING_SNAKE_CASE : Dict = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
SCREAMING_SNAKE_CASE : int = emb.weight.data
return lin_layer
@torch.no_grad()
def UpperCAmelCase_( a__ , a__ , a__=None ):
"""simple docstring"""
if not os.path.exists(_lowercase ):
SCREAMING_SNAKE_CASE : str = torch.hub.load('''pytorch/fairseq''' , _lowercase ).eval()
else:
SCREAMING_SNAKE_CASE : List[Any] = load_xsum_checkpoint(_lowercase )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint_path.replace('''.''' , '''-''' )
SCREAMING_SNAKE_CASE : List[Any] = BartConfig.from_pretrained(_lowercase )
SCREAMING_SNAKE_CASE : Tuple = bart.encode(_lowercase ).unsqueeze(0 )
SCREAMING_SNAKE_CASE : Any = BartTokenizer.from_pretrained(_lowercase ).encode(_lowercase , return_tensors='''pt''' ).unsqueeze(0 )
if not torch.eq(_lowercase , _lowercase ).all():
raise ValueError(
F"""converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}""" )
if checkpoint_path == "bart.large.mnli":
SCREAMING_SNAKE_CASE : int = bart.state_dict()
remove_ignore_keys_(_lowercase )
SCREAMING_SNAKE_CASE : int = state_dict["""model.decoder.embed_tokens.weight"""]
for src, dest in mnli_rename_keys:
rename_key(_lowercase , _lowercase , _lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = BartForSequenceClassification(_lowercase ).eval()
model.load_state_dict(_lowercase )
SCREAMING_SNAKE_CASE : int = bart.predict('''mnli''' , _lowercase , return_logits=_lowercase )
SCREAMING_SNAKE_CASE : str = model(_lowercase )[0] # logits
else: # no classification heads to worry about
SCREAMING_SNAKE_CASE : Dict = bart.model.state_dict()
remove_ignore_keys_(_lowercase )
SCREAMING_SNAKE_CASE : int = state_dict["""decoder.embed_tokens.weight"""]
SCREAMING_SNAKE_CASE : str = bart.extract_features(_lowercase )
if hf_checkpoint_name == "facebook/bart-large":
SCREAMING_SNAKE_CASE : Optional[Any] = BartModel(_lowercase ).eval()
model.load_state_dict(_lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = model(_lowercase ).model[0]
else:
SCREAMING_SNAKE_CASE : List[Any] = BartForConditionalGeneration(_lowercase ).eval() # an existing summarization ckpt
model.model.load_state_dict(_lowercase )
if hasattr(_lowercase , '''lm_head''' ):
SCREAMING_SNAKE_CASE : Tuple = make_linear_from_emb(model.model.shared )
SCREAMING_SNAKE_CASE : Optional[int] = model.model(_lowercase )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
F"""`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}""" )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError('''Some values in `fairseq_output` are different from `new_model_outputs`''' )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
model.save_pretrained(_lowercase )
if __name__ == "__main__":
a__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''', default=None, type=str, help='''Which huggingface architecture to use: bart-large-xsum'''
)
a__ : List[str] = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 313 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_A : str = StableDiffusionSAGPipeline
_A : Optional[Any] = TEXT_TO_IMAGE_PARAMS
_A : Any = TEXT_TO_IMAGE_BATCH_PARAMS
_A : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
_A : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
_A : List[str] = False
def lowerCAmelCase_ ( self: Optional[Any] ) -> str:
torch.manual_seed(0 )
snake_case_ :Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
snake_case_ :Any = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=snake_case , set_alpha_to_one=snake_case , )
torch.manual_seed(0 )
snake_case_ :Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
snake_case_ :Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
snake_case_ :Tuple = CLIPTextModel(snake_case )
snake_case_ :str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
snake_case_ :Dict = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCAmelCase_ ( self: List[str] , snake_case: Tuple , snake_case: List[str]=0 ) -> str:
if str(snake_case ).startswith("""mps""" ):
snake_case_ :Tuple = torch.manual_seed(snake_case )
else:
snake_case_ :Optional[int] = torch.Generator(device=snake_case ).manual_seed(snake_case )
snake_case_ :Any = {
"""prompt""": """.""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 1.0,
"""sag_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase_ ( self: Optional[int] ) -> str:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self: int ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self: int ) -> List[str]:
snake_case_ :Any = StableDiffusionSAGPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" )
snake_case_ :int = sag_pipe.to(snake_case )
sag_pipe.set_progress_bar_config(disable=snake_case )
snake_case_ :Union[str, Any] = """."""
snake_case_ :str = torch.manual_seed(0 )
snake_case_ :str = sag_pipe(
[prompt] , generator=snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" )
snake_case_ :List[Any] = output.images
snake_case_ :Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
snake_case_ :List[Any] = np.array([0.1_5_6_8, 0.1_7_3_8, 0.1_6_9_5, 0.1_6_9_3, 0.1_5_0_7, 0.1_7_0_5, 0.1_5_4_7, 0.1_7_5_1, 0.1_9_4_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def lowerCAmelCase_ ( self: Dict ) -> str:
snake_case_ :Tuple = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
snake_case_ :Optional[int] = sag_pipe.to(snake_case )
sag_pipe.set_progress_bar_config(disable=snake_case )
snake_case_ :Tuple = """."""
snake_case_ :Union[str, Any] = torch.manual_seed(0 )
snake_case_ :Tuple = sag_pipe(
[prompt] , generator=snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" )
snake_case_ :Optional[int] = output.images
snake_case_ :Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
snake_case_ :Tuple = np.array([0.3_4_5_9, 0.2_8_7_6, 0.2_5_3_7, 0.3_0_0_2, 0.2_6_7_1, 0.2_1_6_0, 0.3_0_2_6, 0.2_2_6_2, 0.2_3_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def lowerCAmelCase_ ( self: List[str] ) -> List[str]:
snake_case_ :Optional[int] = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
snake_case_ :int = sag_pipe.to(snake_case )
sag_pipe.set_progress_bar_config(disable=snake_case )
snake_case_ :Tuple = """."""
snake_case_ :Optional[int] = torch.manual_seed(0 )
snake_case_ :List[str] = sag_pipe(
[prompt] , width=768 , height=512 , generator=snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" , )
snake_case_ :Optional[Any] = output.images
assert image.shape == (1, 512, 768, 3)
| 66 | 0 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={"""vocab_file""": """spiece.model"""}
_lowerCamelCase ={
"""vocab_file""": {
"""TsinghuaAI/CPM-Generate""": """https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model""",
}
}
class A__ ( _lowerCAmelCase):
def __init__( self , __magic_name__ , __magic_name__=False , __magic_name__=True , __magic_name__=False , __magic_name__="<s>" , __magic_name__="</s>" , __magic_name__="<unk>" , __magic_name__="<sep>" , __magic_name__="<pad>" , __magic_name__="<cls>" , __magic_name__="<mask>" , __magic_name__=["<eop>", "<eod>"] , __magic_name__ = None , **__magic_name__ , ):
lowerCamelCase : Dict = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else mask_token
lowerCamelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__magic_name__ , remove_space=__magic_name__ , keep_accents=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , pad_token=__magic_name__ , cls_token=__magic_name__ , mask_token=__magic_name__ , additional_special_tokens=__magic_name__ , sp_model_kwargs=self.sp_model_kwargs , **__magic_name__ , )
lowerCamelCase : str = 3
lowerCamelCase : List[Any] = do_lower_case
lowerCamelCase : List[str] = remove_space
lowerCamelCase : str = keep_accents
lowerCamelCase : str = vocab_file
lowerCamelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__magic_name__ )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"""You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """
"""See https://pypi.org/project/jieba/ for installation.""" )
lowerCamelCase : List[str] = jieba
lowerCamelCase : str = str.maketrans(""" \n""" , """\u2582\u2583""" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def UpperCamelCase__ ( self ):
return len(self.sp_model )
def UpperCamelCase__ ( self ):
lowerCamelCase : Any = {self.convert_ids_to_tokens(__magic_name__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
lowerCamelCase : Optional[Any] = self.__dict__.copy()
lowerCamelCase : Dict = None
return state
def __setstate__( self , __magic_name__ ):
lowerCamelCase : Tuple = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCamelCase : int = {}
lowerCamelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase__ ( self , __magic_name__ ):
if self.remove_space:
lowerCamelCase : Dict = """ """.join(inputs.strip().split() )
else:
lowerCamelCase : Any = inputs
lowerCamelCase : Optional[Any] = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
lowerCamelCase : Any = unicodedata.normalize("""NFKD""" , __magic_name__ )
lowerCamelCase : int = """""".join([c for c in outputs if not unicodedata.combining(__magic_name__ )] )
if self.do_lower_case:
lowerCamelCase : Tuple = outputs.lower()
return outputs
def UpperCamelCase__ ( self , __magic_name__ ):
lowerCamelCase : str = self.preprocess_text(__magic_name__ )
lowerCamelCase : Optional[Any] = self.sp_model.encode(__magic_name__ , out_type=__magic_name__ )
lowerCamelCase : int = []
for piece in pieces:
if len(__magic_name__ ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
lowerCamelCase : List[str] = self.sp_model.EncodeAsPieces(piece[:-1].replace(__magic_name__ , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCamelCase : Any = cur_pieces[1:]
else:
lowerCamelCase : Optional[int] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__magic_name__ )
else:
new_pieces.append(__magic_name__ )
return new_pieces
def UpperCamelCase__ ( self , __magic_name__ ):
return self.sp_model.PieceToId(__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ ):
return self.sp_model.IdToPiece(__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ ):
lowerCamelCase : str = """""".join(__magic_name__ ).replace(__magic_name__ , """ """ ).strip()
return out_string
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None ):
lowerCamelCase : Tuple = [self.sep_token_id]
lowerCamelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__magic_name__ , token_ids_a=__magic_name__ , already_has_special_tokens=__magic_name__ )
if token_ids_a is not None:
return ([0] * len(__magic_name__ )) + [1] + ([0] * len(__magic_name__ )) + [1, 1]
return ([0] * len(__magic_name__ )) + [1, 1]
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None ):
lowerCamelCase : List[Any] = [self.sep_token_id]
lowerCamelCase : str = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None ):
if not os.path.isdir(__magic_name__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase : List[Any] = os.path.join(
__magic_name__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __magic_name__ )
elif not os.path.isfile(self.vocab_file ):
with open(__magic_name__ , """wb""" ) as fi:
lowerCamelCase : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(__magic_name__ )
return (out_vocab_file,)
def UpperCamelCase__ ( self , *__magic_name__ , **__magic_name__ ):
lowerCamelCase : int = super()._decode(*__magic_name__ , **__magic_name__ )
lowerCamelCase : Optional[int] = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" )
return text
| 287 |
"""simple docstring"""
from __future__ import annotations
from collections import Counter
from random import random
class lowerCamelCase :
'''simple docstring'''
def __init__( self: Tuple ) -> Optional[Any]:
snake_case_ :Optional[int] = {}
def lowerCAmelCase_ ( self: Dict , snake_case: str ) -> None:
snake_case_ :str = {}
def lowerCAmelCase_ ( self: Optional[int] , snake_case: str , snake_case: str , snake_case: float ) -> None:
if nodea not in self.connections:
self.add_node(snake_case )
if nodea not in self.connections:
self.add_node(snake_case )
snake_case_ :Dict = probability
def lowerCAmelCase_ ( self: List[Any] ) -> list[str]:
return list(self.connections )
def lowerCAmelCase_ ( self: Any , snake_case: str ) -> str:
snake_case_ :Optional[Any] = 0
snake_case_ :List[str] = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def A_ ( _lowercase, _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :List[str] = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(_lowercase, _lowercase, _lowercase )
snake_case_ :int = Counter(graph.get_nodes() )
snake_case_ :Optional[Any] = start
for _ in range(_lowercase ):
snake_case_ :Tuple = graph.transition(_lowercase )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 66 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase_ = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 244 |
"""simple docstring"""
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
__a = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
__a = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
__a = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
__a = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__a = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
__a = [
("pretraining", "MODEL_FOR_PRETRAINING_MAPPING_NAMES", "AutoModelForPreTraining"),
("feature-extraction", "MODEL_MAPPING_NAMES", "AutoModel"),
("audio-classification", "MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioClassification"),
("text-generation", "MODEL_FOR_CAUSAL_LM_MAPPING_NAMES", "AutoModelForCausalLM"),
("automatic-speech-recognition", "MODEL_FOR_CTC_MAPPING_NAMES", "AutoModelForCTC"),
("image-classification", "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForImageClassification"),
("image-segmentation", "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES", "AutoModelForImageSegmentation"),
("fill-mask", "MODEL_FOR_MASKED_LM_MAPPING_NAMES", "AutoModelForMaskedLM"),
("object-detection", "MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForObjectDetection"),
(
"zero-shot-object-detection",
"MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES",
"AutoModelForZeroShotObjectDetection",
),
("question-answering", "MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForQuestionAnswering"),
("text2text-generation", "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES", "AutoModelForSeq2SeqLM"),
("text-classification", "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForSequenceClassification"),
("automatic-speech-recognition", "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES", "AutoModelForSpeechSeq2Seq"),
(
"table-question-answering",
"MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForTableQuestionAnswering",
),
("token-classification", "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES", "AutoModelForTokenClassification"),
("multiple-choice", "MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES", "AutoModelForMultipleChoice"),
(
"next-sentence-prediction",
"MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES",
"AutoModelForNextSentencePrediction",
),
(
"audio-frame-classification",
"MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForAudioFrameClassification",
),
("audio-xvector", "MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES", "AutoModelForAudioXVector"),
(
"document-question-answering",
"MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForDocumentQuestionAnswering",
),
(
"visual-question-answering",
"MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForVisualQuestionAnswering",
),
("image-to-text", "MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES", "AutoModelForVision2Seq"),
(
"zero-shot-image-classification",
"MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForZeroShotImageClassification",
),
("depth-estimation", "MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES", "AutoModelForDepthEstimation"),
("video-classification", "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForVideoClassification"),
("mask-generation", "MODEL_FOR_MASK_GENERATION_MAPPING_NAMES", "AutoModelForMaskGeneration"),
]
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :Any = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""", _lowercase )
return [m.group(0 ) for m in matches]
def A_ ( ):
'''simple docstring'''
snake_case_ :int = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
snake_case_ :Dict = {
config.replace("""Config""", """""" ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
snake_case_ :Optional[Any] = collections.defaultdict(_lowercase )
snake_case_ :int = collections.defaultdict(_lowercase )
snake_case_ :List[str] = collections.defaultdict(_lowercase )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(_lowercase ):
snake_case_ :int = None
if _re_tf_models.match(_lowercase ) is not None:
snake_case_ :int = tf_models
snake_case_ :List[str] = _re_tf_models.match(_lowercase ).groups()[0]
elif _re_flax_models.match(_lowercase ) is not None:
snake_case_ :List[Any] = flax_models
snake_case_ :Any = _re_flax_models.match(_lowercase ).groups()[0]
elif _re_pt_models.match(_lowercase ) is not None:
snake_case_ :Optional[Any] = pt_models
snake_case_ :int = _re_pt_models.match(_lowercase ).groups()[0]
if lookup_dict is not None:
while len(_lowercase ) > 0:
if attr_name in model_prefix_to_model_type:
snake_case_ :Optional[int] = True
break
# Try again after removing the last word in the name
snake_case_ :Optional[Any] = """""".join(camel_case_split(_lowercase )[:-1] )
snake_case_ :Optional[int] = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
snake_case_ :Optional[Any] = list(_lowercase )
all_models.sort()
snake_case_ :Optional[int] = {"""model_type""": all_models}
snake_case_ :Optional[int] = [pt_models[t] for t in all_models]
snake_case_ :Any = [tf_models[t] for t in all_models]
snake_case_ :Dict = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
snake_case_ :Dict = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
snake_case_ :Optional[Any] = """AutoProcessor"""
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
snake_case_ :Tuple = """AutoTokenizer"""
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
snake_case_ :Tuple = """AutoFeatureExtractor"""
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
snake_case_ :str = """AutoTokenizer"""
snake_case_ :int = [processors[t] for t in all_models]
return pd.DataFrame(_lowercase )
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :List[Any] = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
snake_case_ :Optional[int] = [model_mapping, f"""TF_{model_mapping}""", f"""FLAX_{model_mapping}"""]
snake_case_ :List[str] = [auto_class, f"""TF_{auto_class}""", f"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(_lowercase, _lowercase, _lowercase ):
# The type of pipeline may not exist in this framework
if not hasattr(_lowercase, _lowercase ):
continue
# First extract all model_names
snake_case_ :Tuple = []
for name in getattr(_lowercase, _lowercase ).values():
if isinstance(_lowercase, _lowercase ):
model_names.append(_lowercase )
else:
model_names.extend(list(_lowercase ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :List[Any] = get_frameworks_table()
snake_case_ :str = Dataset.from_pandas(_lowercase )
snake_case_ :List[Any] = hf_hub_download(
"""huggingface/transformers-metadata""", """pipeline_tags.json""", repo_type="""dataset""", token=_lowercase )
snake_case_ :List[str] = Dataset.from_json(_lowercase )
snake_case_ :int = {
tags_dataset[i]["""model_class"""]: (tags_dataset[i]["""pipeline_tag"""], tags_dataset[i]["""auto_class"""])
for i in range(len(_lowercase ) )
}
snake_case_ :Optional[int] = update_pipeline_and_auto_class_table(_lowercase )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
snake_case_ :Tuple = sorted(table.keys() )
snake_case_ :Tuple = pd.DataFrame(
{
"""model_class""": model_classes,
"""pipeline_tag""": [table[m][0] for m in model_classes],
"""auto_class""": [table[m][1] for m in model_classes],
} )
snake_case_ :Union[str, Any] = Dataset.from_pandas(_lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(_lowercase, """frameworks.json""" ) )
tags_dataset.to_json(os.path.join(_lowercase, """pipeline_tags.json""" ) )
if commit_sha is not None:
snake_case_ :Union[str, Any] = (
f"""Update with commit {commit_sha}\n\nSee: """
f"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
snake_case_ :List[Any] = """Update"""
upload_folder(
repo_id="""huggingface/transformers-metadata""", folder_path=_lowercase, repo_type="""dataset""", token=_lowercase, commit_message=_lowercase, )
def A_ ( ):
'''simple docstring'''
snake_case_ :List[Any] = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
snake_case_ :Dict = transformers_module.pipelines.SUPPORTED_TASKS
snake_case_ :List[str] = []
for key in pipeline_tasks:
if key not in in_table:
snake_case_ :int = pipeline_tasks[key]["""pt"""]
if isinstance(_lowercase, (list, tuple) ):
snake_case_ :Any = model[0]
snake_case_ :str = model.__name__
if model not in in_table.values():
missing.append(_lowercase )
if len(_lowercase ) > 0:
snake_case_ :Optional[int] = """, """.join(_lowercase )
raise ValueError(
"""The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside """
f"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument("--token", type=str, help="The token to use to push to the transformers-metadata dataset.")
parser.add_argument("--commit_sha", type=str, help="The sha of the commit going with this update.")
parser.add_argument("--check-only", action="store_true", help="Activate to just check all pipelines are present.")
__a = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 66 | 0 |
"""simple docstring"""
from math import factorial
class __A :
"""simple docstring"""
def __init__( self , __A , __A ) -> Tuple:
a =real
if isinstance(__A , __A ):
a =[1] * rank
else:
a =rank
def __repr__( self ) -> Tuple:
return (
f'''{self.real}+'''
f'''{"+".join(str(__A )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}'''
)
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a =self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , __A )
def __add__( self , __A ) -> List[str]:
if not isinstance(__A , __A ):
return Dual(self.real + other , self.duals )
a =self.duals.copy()
a =other.duals.copy()
if len(__A ) > len(__A ):
o_dual.extend([1] * (len(__A ) - len(__A )) )
elif len(__A ) < len(__A ):
s_dual.extend([1] * (len(__A ) - len(__A )) )
a =[]
for i in range(len(__A ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , __A )
__lowerCAmelCase = __add__
def __sub__( self , __A ) -> Tuple:
return self + other * -1
def __mul__( self , __A ) -> Optional[Any]:
if not isinstance(__A , __A ):
a =[]
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , __A )
a =[0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , __A )
__lowerCAmelCase = __mul__
def __truediv__( self , __A ) -> List[str]:
if not isinstance(__A , __A ):
a =[]
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , __A )
raise ValueError
def __floordiv__( self , __A ) -> Any:
if not isinstance(__A , __A ):
a =[]
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , __A )
raise ValueError
def __pow__( self , __A ) -> List[Any]:
if n < 0 or isinstance(__A , __A ):
raise ValueError('''power must be a positive integer''' )
if n == 0:
return 1
if n == 1:
return self
a =self
for _ in range(n - 1 ):
x *= self
return x
def _A ( lowercase , lowercase , lowercase ):
"""simple docstring"""
if not callable(_lowercase ):
raise ValueError('''differentiate() requires a function as input for func''' )
if not isinstance(_lowercase , (float, int) ):
raise ValueError('''differentiate() requires a float as input for position''' )
if not isinstance(_lowercase , _lowercase ):
raise ValueError('''differentiate() requires an int as input for order''' )
a =Dual(_lowercase , 1 )
a =func(_lowercase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def _A ( lowercase ):
"""simple docstring"""
return y**2 * y**4
print(differentiate(f, 9, 2)) | 81 |
"""simple docstring"""
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
__a = logging.getLogger(__name__)
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
_A : Union[str, Any] = """token-classification"""
def __init__( self: Any , snake_case: Tuple ) -> List[Any]:
if type(snake_case ) == dict:
snake_case_ :Optional[int] = Namespace(**snake_case )
snake_case_ :Optional[int] = import_module("""tasks""" )
try:
snake_case_ :Any = getattr(snake_case , hparams.task_type )
snake_case_ :TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"""Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
snake_case_ :Any = self.token_classification_task.get_labels(hparams.labels )
snake_case_ :str = CrossEntropyLoss().ignore_index
super().__init__(snake_case , len(self.labels ) , self.mode )
def lowerCAmelCase_ ( self: Dict , **snake_case: List[Any] ) -> Any:
return self.model(**snake_case )
def lowerCAmelCase_ ( self: str , snake_case: Tuple , snake_case: List[Any] ) -> Optional[int]:
snake_case_ :List[str] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
snake_case_ :List[str] = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
snake_case_ :Optional[Any] = self(**snake_case )
snake_case_ :List[str] = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def lowerCAmelCase_ ( self: int ) -> Dict:
snake_case_ :List[Any] = self.hparams
for mode in ["train", "dev", "test"]:
snake_case_ :Optional[int] = self._feature_file(snake_case )
if os.path.exists(snake_case ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , snake_case )
snake_case_ :Optional[int] = torch.load(snake_case )
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir )
snake_case_ :Optional[int] = self.token_classification_task.read_examples_from_file(args.data_dir , snake_case )
snake_case_ :Any = self.token_classification_task.convert_examples_to_features(
snake_case , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["""xlnet"""] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["""xlnet"""] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=snake_case , pad_on_left=bool(self.config.model_type in ["""xlnet"""] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("""Saving features into cached file %s""" , snake_case )
torch.save(snake_case , snake_case )
def lowerCAmelCase_ ( self: Optional[int] , snake_case: int , snake_case: int , snake_case: bool = False ) -> DataLoader:
snake_case_ :int = self._feature_file(snake_case )
logger.info("""Loading features from cached file %s""" , snake_case )
snake_case_ :str = torch.load(snake_case )
snake_case_ :Dict = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
snake_case_ :str = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
snake_case_ :List[Any] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
snake_case_ :List[str] = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
snake_case_ :Any = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(snake_case , snake_case , snake_case , snake_case ) , batch_size=snake_case )
def lowerCAmelCase_ ( self: List[str] , snake_case: Dict , snake_case: Union[str, Any] ) -> List[str]:
"""Compute validation""" ""
snake_case_ :List[str] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
snake_case_ :Dict = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
snake_case_ :Dict = self(**snake_case )
snake_case_, snake_case_ :Dict = outputs[:2]
snake_case_ :Union[str, Any] = logits.detach().cpu().numpy()
snake_case_ :List[Any] = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def lowerCAmelCase_ ( self: List[Any] , snake_case: int ) -> Tuple:
snake_case_ :Union[str, Any] = torch.stack([x["""val_loss"""] for x in outputs] ).mean()
snake_case_ :Tuple = np.concatenate([x["""pred"""] for x in outputs] , axis=0 )
snake_case_ :Tuple = np.argmax(snake_case , axis=2 )
snake_case_ :List[str] = np.concatenate([x["""target"""] for x in outputs] , axis=0 )
snake_case_ :Optional[Any] = dict(enumerate(self.labels ) )
snake_case_ :Dict = [[] for _ in range(out_label_ids.shape[0] )]
snake_case_ :Dict = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
snake_case_ :str = {
"""val_loss""": val_loss_mean,
"""accuracy_score""": accuracy_score(snake_case , snake_case ),
"""precision""": precision_score(snake_case , snake_case ),
"""recall""": recall_score(snake_case , snake_case ),
"""f1""": fa_score(snake_case , snake_case ),
}
snake_case_ :List[Any] = dict(results.items() )
snake_case_ :Union[str, Any] = results
return ret, preds_list, out_label_list
def lowerCAmelCase_ ( self: Optional[Any] , snake_case: Dict ) -> Optional[Any]:
# when stable
snake_case_, snake_case_, snake_case_ :Tuple = self._eval_end(snake_case )
snake_case_ :str = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def lowerCAmelCase_ ( self: Tuple , snake_case: Optional[int] ) -> Any:
# updating to test_epoch_end instead of deprecated test_end
snake_case_, snake_case_, snake_case_ :Any = self._eval_end(snake_case )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
snake_case_ :Optional[int] = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def lowerCAmelCase_ ( snake_case: Any , snake_case: int ) -> Dict:
# Add NER specific options
BaseTransformer.add_model_specific_args(snake_case , snake_case )
parser.add_argument(
"""--task_type""" , default="""NER""" , type=snake_case , help="""Task type to fine tune in training (e.g. NER, POS, etc)""" )
parser.add_argument(
"""--max_seq_length""" , default=128 , type=snake_case , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--labels""" , default="""""" , type=snake_case , help="""Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=snake_case , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
return parser
if __name__ == "__main__":
__a = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
__a = NERTransformer.add_model_specific_args(parser, os.getcwd())
__a = parser.parse_args()
__a = NERTransformer(args)
__a = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
__a = sorted(glob.glob(os.path.join(args.output_dir, "checkpoint-epoch=*.ckpt"), recursive=True))
__a = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 66 | 0 |
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
A__: Dict = True
except (ImportError, AttributeError):
A__: List[str] = object
def SCREAMING_SNAKE_CASE_ ( *_UpperCAmelCase : int ,**_UpperCAmelCase : Dict ) -> str:
pass
A__: str = False
A__: Tuple = logging.get_logger('''transformers-cli/serving''')
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ) -> List[Any]:
_a : Optional[Any] =pipeline(
task=args.task ,model=args.model if args.model else None ,config=args.config ,tokenizer=args.tokenizer ,device=args.device ,)
return ServeCommand(_lowercase ,args.host ,args.port ,args.workers )
class A__ ( _lowerCAmelCase ):
__UpperCamelCase : dict
class A__ ( _lowerCAmelCase ):
__UpperCamelCase : List[str]
__UpperCamelCase : Optional[List[int]]
class A__ ( _lowerCAmelCase ):
__UpperCamelCase : str
class A__ ( _lowerCAmelCase ):
__UpperCamelCase : Any
class A__ ( _lowerCAmelCase ):
@staticmethod
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :ArgumentParser ) -> Tuple:
'''simple docstring'''
_a : Any =parser.add_parser(
"""serve""" , help="""CLI tool to run inference requests through REST and GraphQL endpoints.""" )
serve_parser.add_argument(
"""--task""" , type=SCREAMING_SNAKE_CASE , choices=get_supported_tasks() , help="""The task to run the pipeline on""" , )
serve_parser.add_argument("""--host""" , type=SCREAMING_SNAKE_CASE , default="""localhost""" , help="""Interface the server will listen on.""" )
serve_parser.add_argument("""--port""" , type=SCREAMING_SNAKE_CASE , default=8_8_8_8 , help="""Port the serving will listen to.""" )
serve_parser.add_argument("""--workers""" , type=SCREAMING_SNAKE_CASE , default=1 , help="""Number of http workers""" )
serve_parser.add_argument("""--model""" , type=SCREAMING_SNAKE_CASE , help="""Model's name or path to stored model.""" )
serve_parser.add_argument("""--config""" , type=SCREAMING_SNAKE_CASE , help="""Model's config name or path to stored model.""" )
serve_parser.add_argument("""--tokenizer""" , type=SCREAMING_SNAKE_CASE , help="""Tokenizer name to use.""" )
serve_parser.add_argument(
"""--device""" , type=SCREAMING_SNAKE_CASE , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , )
serve_parser.set_defaults(func=SCREAMING_SNAKE_CASE )
def __init__( self :int , SCREAMING_SNAKE_CASE :Pipeline , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :int ) -> List[Any]:
'''simple docstring'''
_a : Optional[Any] =pipeline
_a : Optional[Any] =host
_a : Optional[Any] =port
_a : Tuple =workers
if not _serve_dependencies_installed:
raise RuntimeError(
"""Using serve command requires FastAPI and uvicorn. """
"""Please install transformers with [serving]: pip install \"transformers[serving]\"."""
"""Or install FastAPI and uvicorn separately.""" )
else:
logger.info(f"Serving model over {host}:{port}" )
_a : List[str] =FastAPI(
routes=[
APIRoute(
"""/""" , self.model_info , response_model=SCREAMING_SNAKE_CASE , response_class=SCREAMING_SNAKE_CASE , methods=["""GET"""] , ),
APIRoute(
"""/tokenize""" , self.tokenize , response_model=SCREAMING_SNAKE_CASE , response_class=SCREAMING_SNAKE_CASE , methods=["""POST"""] , ),
APIRoute(
"""/detokenize""" , self.detokenize , response_model=SCREAMING_SNAKE_CASE , response_class=SCREAMING_SNAKE_CASE , methods=["""POST"""] , ),
APIRoute(
"""/forward""" , self.forward , response_model=SCREAMING_SNAKE_CASE , response_class=SCREAMING_SNAKE_CASE , methods=["""POST"""] , ),
] , timeout=6_0_0 , )
def __UpperCAmelCase ( self :Optional[Any] ) -> Dict:
'''simple docstring'''
run(self._app , host=self.host , port=self.port , workers=self.workers )
def __UpperCAmelCase ( self :Any ) -> Any:
'''simple docstring'''
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :str = Body(SCREAMING_SNAKE_CASE , embed=SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE :bool = Body(SCREAMING_SNAKE_CASE , embed=SCREAMING_SNAKE_CASE ) ) -> Union[str, Any]:
'''simple docstring'''
try:
_a : Dict =self._pipeline.tokenizer.tokenize(SCREAMING_SNAKE_CASE )
if return_ids:
_a : int =self._pipeline.tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE )
return ServeTokenizeResult(tokens=SCREAMING_SNAKE_CASE , tokens_ids=SCREAMING_SNAKE_CASE )
else:
return ServeTokenizeResult(tokens=SCREAMING_SNAKE_CASE )
except Exception as e:
raise HTTPException(status_code=5_0_0 , detail={"""model""": """""", """error""": str(SCREAMING_SNAKE_CASE )} )
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :List[int] = Body(SCREAMING_SNAKE_CASE , embed=SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE :bool = Body(SCREAMING_SNAKE_CASE , embed=SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE :bool = Body(SCREAMING_SNAKE_CASE , embed=SCREAMING_SNAKE_CASE ) , ) -> Union[str, Any]:
'''simple docstring'''
try:
_a : Dict =self._pipeline.tokenizer.decode(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return ServeDeTokenizeResult(model="""""" , text=SCREAMING_SNAKE_CASE )
except Exception as e:
raise HTTPException(status_code=5_0_0 , detail={"""model""": """""", """error""": str(SCREAMING_SNAKE_CASE )} )
async def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :Optional[int]=Body(SCREAMING_SNAKE_CASE , embed=SCREAMING_SNAKE_CASE ) ) -> Union[str, Any]:
'''simple docstring'''
# Check we don't have empty string
if len(SCREAMING_SNAKE_CASE ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
_a : List[str] =self._pipeline(SCREAMING_SNAKE_CASE )
return ServeForwardResult(output=SCREAMING_SNAKE_CASE )
except Exception as e:
raise HTTPException(5_0_0 , {"""error""": str(SCREAMING_SNAKE_CASE )} )
| 276 |
"""simple docstring"""
from math import factorial
class lowerCamelCase :
'''simple docstring'''
def __init__( self: Optional[int] , snake_case: Dict , snake_case: int ) -> Tuple:
snake_case_ :List[Any] = real
if isinstance(snake_case , snake_case ):
snake_case_ :Tuple = [1] * rank
else:
snake_case_ :Optional[Any] = rank
def __repr__( self: List[str] ) -> Tuple:
return (
f"""{self.real}+"""
f"""{'+'.join(str(snake_case )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"""
)
def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[int]:
snake_case_ :Any = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , snake_case )
def __add__( self: Optional[int] , snake_case: Dict ) -> List[str]:
if not isinstance(snake_case , snake_case ):
return Dual(self.real + other , self.duals )
snake_case_ :List[Any] = self.duals.copy()
snake_case_ :Tuple = other.duals.copy()
if len(snake_case ) > len(snake_case ):
o_dual.extend([1] * (len(snake_case ) - len(snake_case )) )
elif len(snake_case ) < len(snake_case ):
s_dual.extend([1] * (len(snake_case ) - len(snake_case )) )
snake_case_ :Dict = []
for i in range(len(snake_case ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , snake_case )
_A : str = __add__
def __sub__( self: Tuple , snake_case: Union[str, Any] ) -> Tuple:
return self + other * -1
def __mul__( self: str , snake_case: Tuple ) -> Optional[Any]:
if not isinstance(snake_case , snake_case ):
snake_case_ :Dict = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , snake_case )
snake_case_ :int = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , snake_case )
_A : int = __mul__
def __truediv__( self: List[str] , snake_case: List[str] ) -> List[str]:
if not isinstance(snake_case , snake_case ):
snake_case_ :Optional[Any] = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , snake_case )
raise ValueError
def __floordiv__( self: int , snake_case: List[Any] ) -> Any:
if not isinstance(snake_case , snake_case ):
snake_case_ :Optional[int] = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , snake_case )
raise ValueError
def __pow__( self: Optional[Any] , snake_case: Optional[int] ) -> List[Any]:
if n < 0 or isinstance(snake_case , snake_case ):
raise ValueError("""power must be a positive integer""" )
if n == 0:
return 1
if n == 1:
return self
snake_case_ :str = self
for _ in range(n - 1 ):
x *= self
return x
def A_ ( _lowercase, _lowercase, _lowercase ):
'''simple docstring'''
if not callable(_lowercase ):
raise ValueError("""differentiate() requires a function as input for func""" )
if not isinstance(_lowercase, (float, int) ):
raise ValueError("""differentiate() requires a float as input for position""" )
if not isinstance(_lowercase, _lowercase ):
raise ValueError("""differentiate() requires an int as input for order""" )
snake_case_ :Optional[Any] = Dual(_lowercase, 1 )
snake_case_ :List[Any] = func(_lowercase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def A_ ( _lowercase ):
'''simple docstring'''
return y**2 * y**4
print(differentiate(f, 9, 2))
| 66 | 0 |
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__a = {
'/attention/': '/0/SelfAttention/',
'/self_attention/': '/0/SelfAttention/',
'/encoder_decoder_attention/': '/1/EncDecAttention/',
'value': 'v',
'query': 'q',
'key': 'k',
'out': 'o',
'pre_self_attention_layer_norm': '0/layer_norm',
'pre_cross_attention_layer_norm': '1/layer_norm',
'pre_attention_layer_norm': '0/layer_norm', # previously 1, but seems wrong
'token_embedder': 'shared',
'encoder_norm': 'final_layer_norm',
'decoder_norm': 'final_layer_norm',
'relpos_bias/rel_embedding': 'block/0/layer/0/SelfAttention/relative_attention_bias/weight',
'router/router_weights/w/': 'router/classifier/',
'roer/roer_weights/w/': 'router/classifier/',
'logits_dense': 'lm_head',
}
def __UpperCAmelCase ( a_: List[str] ):
_UpperCAmelCase : Optional[Any] = list(s_dict.keys() )
for key in keys:
_UpperCAmelCase : Dict = r""".*/layers_(\d+)"""
_UpperCAmelCase : Dict = key
if re.match(_lowercase, _lowercase ):
_UpperCAmelCase : Dict = re.sub(r"layers_(\d+)", r"block/\1/layer", _lowercase )
_UpperCAmelCase : List[Any] = r"""(encoder|decoder)\/"""
if re.match(_lowercase, _lowercase ):
_UpperCAmelCase : Any = re.match(_lowercase, _lowercase ).groups()
if groups[0] == "encoder":
_UpperCAmelCase : str = re.sub(r"/mlp/", r"/1/mlp/", _lowercase )
_UpperCAmelCase : Optional[Any] = re.sub(r"/pre_mlp_layer_norm/", r"/1/layer_norm/", _lowercase )
elif groups[0] == "decoder":
_UpperCAmelCase : int = re.sub(r"/mlp/", r"/2/mlp/", _lowercase )
_UpperCAmelCase : Optional[Any] = re.sub(r"/pre_mlp_layer_norm/", r"/2/layer_norm/", _lowercase )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
_UpperCAmelCase : Union[str, Any] = new_key.replace(_lowercase, _lowercase )
print(f"""{key} -> {new_key}""" )
_UpperCAmelCase : List[Any] = s_dict.pop(_lowercase )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_UpperCAmelCase : List[Any] = s_dict[
"""encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_UpperCAmelCase : Tuple = s_dict[
"""decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
_UpperCAmelCase : Tuple = s_dict[key].shape[0]
_UpperCAmelCase : int = s_dict[key]
for idx in range(_lowercase ):
_UpperCAmelCase : Union[str, Any] = expert_weihts[idx]
print(f"""{key} -> {key.replace('expert/', 'nested fstring' )}""" )
s_dict.pop(_lowercase )
return s_dict
__a = {
'NUM_ENCODER_LAYERS': 'num_layers',
'NUM_DECODER_LAYERS': 'num_decoder_layers',
'NUM_HEADS': 'num_heads',
'HEAD_DIM': 'd_kv',
'EMBED_DIM': 'd_model',
'MLP_DIM': 'd_ff',
'NUM_SELECTED_EXPERTS': 'num_selected_experts',
'NUM_ENCODER_SPARSE_LAYERS': 'num_sparse_encoder_layers',
'NUM_DECODER_SPARSE_LAYERS': 'num_sparse_decoder_layers',
'dense.MlpBlock.activations': 'feed_forward_proj',
}
def __UpperCAmelCase ( a_: Optional[Any], a_: Union[str, Any] ):
import regex as re
with open(_lowercase, "r" ) as f:
_UpperCAmelCase : Tuple = f.read()
_UpperCAmelCase : Dict = re.findall(r"(.*) = ([0-9.]*)", _lowercase )
_UpperCAmelCase : Union[str, Any] = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
_UpperCAmelCase : int = float(_lowercase ) if """.""" in value else int(_lowercase )
_UpperCAmelCase : Dict = re.findall(r"(.*activations) = \(\'(.*)\',\)", _lowercase )[0]
_UpperCAmelCase : Optional[int] = str(activation[1] )
_UpperCAmelCase : Dict = num_experts
_UpperCAmelCase : int = SwitchTransformersConfig(**_lowercase )
return config
def __UpperCAmelCase ( a_: Union[str, Any], a_: Union[str, Any], a_: List[str]=None, a_: Optional[Any]="./", a_: Tuple=8 ):
print(f"""Loading flax weights from : {flax_checkpoint_path}""" )
_UpperCAmelCase : List[Any] = checkpoints.load_tax_checkpoint(_lowercase )
if gin_file is not None:
_UpperCAmelCase : Dict = convert_gin_to_config(_lowercase, _lowercase )
else:
_UpperCAmelCase : Optional[Any] = SwitchTransformersConfig.from_pretrained(_lowercase )
_UpperCAmelCase : Union[str, Any] = SwitchTransformersForConditionalGeneration(_lowercase )
_UpperCAmelCase : Tuple = flax_params["""target"""]
_UpperCAmelCase : Any = flatten_dict(_lowercase, sep="/" )
_UpperCAmelCase : int = rename_keys(_lowercase )
_UpperCAmelCase : List[Any] = unflatten_dict(_lowercase, sep="/" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(_lowercase, _lowercase )
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
pt_model.save_pretrained(_lowercase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'
' model architecture. If not provided, a `gin_file` has to be provided.'
),
)
parser.add_argument(
'--gin_file',
default=None,
type=str,
required=False,
help='Path to the gin config file. If not provided, a `config_file` has to be passed ',
)
parser.add_argument(
'--config_name', default=None, type=str, required=False, help='Config name of SwitchTransformers model.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output pytorch model.'
)
parser.add_argument('--num_experts', default=8, type=int, required=False, help='Number of experts')
__a = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
) | 145 |
"""simple docstring"""
from __future__ import annotations
__a = 10
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :Union[str, Any] = 1
snake_case_ :List[str] = max(_lowercase )
while placement <= max_digit:
# declare and initialize empty buckets
snake_case_ :list[list] = [[] for _ in range(_lowercase )]
# split list_of_ints between the buckets
for i in list_of_ints:
snake_case_ :Any = int((i / placement) % RADIX )
buckets[tmp].append(_lowercase )
# put each buckets' contents into list_of_ints
snake_case_ :Optional[Any] = 0
for b in range(_lowercase ):
for i in buckets[b]:
snake_case_ :Union[str, Any] = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 66 | 0 |
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
if mass < 0:
raise ValueError('''The mass of a body cannot be negative''' )
return 0.5 * mass * abs(_lowercase ) * abs(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 260 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__a = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["ReformerTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["ReformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
"ReformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 66 | 0 |
import math
class snake_case__:
'''simple docstring'''
def lowercase_ ( self , __lowercase , __lowercase ) -> int:
lowerCAmelCase_ : Any = 0.0
lowerCAmelCase_ : Tuple = 0.0
for i in range(len(__lowercase ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase ) -> list[list[int | float]]:
for i in range(len(__lowercase ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def lowerCAmelCase ( )-> int:
lowerCAmelCase_ : Dict = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
lowerCAmelCase_ : List[Any] = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
lowerCAmelCase_ : Optional[Any] = SelfOrganizingMap()
lowerCAmelCase_ : Dict = 3
lowerCAmelCase_ : Dict = 0.5
for _ in range(_lowercase ):
for j in range(len(_lowercase ) ):
# training sample
lowerCAmelCase_ : List[Any] = training_samples[j]
# Compute the winning vector
lowerCAmelCase_ : Optional[int] = self_organizing_map.get_winner(_lowercase , _lowercase )
# Update the winning vector
lowerCAmelCase_ : List[str] = self_organizing_map.update(_lowercase , _lowercase , _lowercase , _lowercase )
# classify test sample
lowerCAmelCase_ : str = [0, 0, 0, 1]
lowerCAmelCase_ : List[Any] = self_organizing_map.get_winner(_lowercase , _lowercase )
# results
print(f"""Clusters that the test sample belongs to : {winner}""" )
print(f"""Weights that have been trained : {weights}""" )
# running the main() function
if __name__ == "__main__":
main() | 262 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self: List[Any] ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowerCAmelCase_ ( self: Tuple ) -> Any:
snake_case_, snake_case_ :List[str] = FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-canny""" , from_pt=snake_case , dtype=jnp.bfloataa )
snake_case_, snake_case_ :Union[str, Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , controlnet=snake_case , from_pt=snake_case , dtype=jnp.bfloataa )
snake_case_ :Union[str, Any] = controlnet_params
snake_case_ :Union[str, Any] = """bird"""
snake_case_ :List[Any] = jax.device_count()
snake_case_ :List[Any] = pipe.prepare_text_inputs([prompts] * num_samples )
snake_case_ :List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" )
snake_case_ :List[str] = pipe.prepare_image_inputs([canny_image] * num_samples )
snake_case_ :Any = jax.random.PRNGKey(0 )
snake_case_ :List[str] = jax.random.split(snake_case , jax.device_count() )
snake_case_ :List[Any] = replicate(snake_case )
snake_case_ :List[str] = shard(snake_case )
snake_case_ :str = shard(snake_case )
snake_case_ :Dict = pipe(
prompt_ids=snake_case , image=snake_case , params=snake_case , prng_seed=snake_case , num_inference_steps=50 , jit=snake_case , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
snake_case_ :str = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
snake_case_ :Union[str, Any] = images[0, 253:256, 253:256, -1]
snake_case_ :str = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case_ :Dict = jnp.array(
[0.1_6_7_9_6_9, 0.1_1_6_6_9_9, 0.0_8_1_5_4_3, 0.1_5_4_2_9_7, 0.1_3_2_8_1_2, 0.1_0_8_8_8_7, 0.1_6_9_9_2_2, 0.1_6_9_9_2_2, 0.2_0_5_0_7_8] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowerCAmelCase_ ( self: int ) -> Dict:
snake_case_, snake_case_ :List[Any] = FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-openpose""" , from_pt=snake_case , dtype=jnp.bfloataa )
snake_case_, snake_case_ :int = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , controlnet=snake_case , from_pt=snake_case , dtype=jnp.bfloataa )
snake_case_ :str = controlnet_params
snake_case_ :Optional[int] = """Chef in the kitchen"""
snake_case_ :Union[str, Any] = jax.device_count()
snake_case_ :Any = pipe.prepare_text_inputs([prompts] * num_samples )
snake_case_ :str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png""" )
snake_case_ :Optional[Any] = pipe.prepare_image_inputs([pose_image] * num_samples )
snake_case_ :str = jax.random.PRNGKey(0 )
snake_case_ :str = jax.random.split(snake_case , jax.device_count() )
snake_case_ :Tuple = replicate(snake_case )
snake_case_ :str = shard(snake_case )
snake_case_ :int = shard(snake_case )
snake_case_ :List[str] = pipe(
prompt_ids=snake_case , image=snake_case , params=snake_case , prng_seed=snake_case , num_inference_steps=50 , jit=snake_case , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
snake_case_ :str = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
snake_case_ :int = images[0, 253:256, 253:256, -1]
snake_case_ :Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case_ :Optional[int] = jnp.array(
[[0.2_7_1_4_8_4, 0.2_6_1_7_1_9, 0.2_7_5_3_9_1, 0.2_7_7_3_4_4, 0.2_7_9_2_9_7, 0.2_9_1_0_1_6, 0.2_9_4_9_2_2, 0.3_0_2_7_3_4, 0.3_0_2_7_3_4]] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 66 | 0 |
def __snake_case ( ):
"""simple docstring"""
A_ = []
A_ = 1
while len(_lowercase ) < 1E6:
constant.append(str(_lowercase ) )
i += 1
A_ = """""".join(_lowercase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution()) | 312 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a = {
"configuration_mobilebert": [
"MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"MobileBertConfig",
"MobileBertOnnxConfig",
],
"tokenization_mobilebert": ["MobileBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["MobileBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileBertForMaskedLM",
"MobileBertForMultipleChoice",
"MobileBertForNextSentencePrediction",
"MobileBertForPreTraining",
"MobileBertForQuestionAnswering",
"MobileBertForSequenceClassification",
"MobileBertForTokenClassification",
"MobileBertLayer",
"MobileBertModel",
"MobileBertPreTrainedModel",
"load_tf_weights_in_mobilebert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileBertForMaskedLM",
"TFMobileBertForMultipleChoice",
"TFMobileBertForNextSentencePrediction",
"TFMobileBertForPreTraining",
"TFMobileBertForQuestionAnswering",
"TFMobileBertForSequenceClassification",
"TFMobileBertForTokenClassification",
"TFMobileBertMainLayer",
"TFMobileBertModel",
"TFMobileBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 66 | 0 |
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
lowerCAmelCase__ : int = XCLIPTextConfig()
# derive patch size from model name
lowerCAmelCase__ : Union[str, Any] = model_name.find('patch' )
lowerCAmelCase__ : List[str] = int(model_name[start_idx + len('patch' ) : start_idx + len('patch' ) + 2] )
lowerCAmelCase__ : Any = XCLIPVisionConfig(patch_size=_lowercase , num_frames=_lowercase )
if "large" in model_name:
lowerCAmelCase__ : Optional[Any] = 768
lowerCAmelCase__ : Union[str, Any] = 3_072
lowerCAmelCase__ : Any = 12
lowerCAmelCase__ : Any = 1_024
lowerCAmelCase__ : str = 4_096
lowerCAmelCase__ : Union[str, Any] = 16
lowerCAmelCase__ : Union[str, Any] = 24
lowerCAmelCase__ : Tuple = 768
lowerCAmelCase__ : Any = 3_072
if model_name == "xclip-large-patch14-16-frames":
lowerCAmelCase__ : Any = 336
lowerCAmelCase__ : Any = XCLIPConfig.from_text_vision_configs(_lowercase , _lowercase )
if "large" in model_name:
lowerCAmelCase__ : List[Any] = 768
return config
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
if name == "token_embedding.weight":
lowerCAmelCase__ : Optional[Any] = name.replace('token_embedding.weight' , 'text_model.embeddings.token_embedding.weight' )
if name == "positional_embedding":
lowerCAmelCase__ : Tuple = name.replace('positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "ln_1" in name:
lowerCAmelCase__ : Dict = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
lowerCAmelCase__ : str = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
lowerCAmelCase__ : str = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
lowerCAmelCase__ : int = name.replace('c_proj' , 'fc2' )
if name.startswith('transformer.resblocks' ):
lowerCAmelCase__ : Union[str, Any] = name.replace('transformer.resblocks' , 'text_model.encoder.layers' )
if "attn.out_proj" in name and "message" not in name:
lowerCAmelCase__ : Union[str, Any] = name.replace('attn.out_proj' , 'self_attn.out_proj' )
if "ln_final" in name:
lowerCAmelCase__ : Union[str, Any] = name.replace('ln_final' , 'text_model.final_layer_norm' )
# visual encoder
if name == "visual.class_embedding":
lowerCAmelCase__ : Any = name.replace('visual.class_embedding' , 'vision_model.embeddings.class_embedding' )
if name == "visual.positional_embedding":
lowerCAmelCase__ : Optional[int] = name.replace('visual.positional_embedding' , 'vision_model.embeddings.position_embedding.weight' )
if name.startswith('visual.transformer.resblocks' ):
lowerCAmelCase__ : Union[str, Any] = name.replace('visual.transformer.resblocks' , 'vision_model.encoder.layers' )
if "visual.conv1" in name:
lowerCAmelCase__ : int = name.replace('visual.conv1' , 'vision_model.embeddings.patch_embedding' )
if "visual.ln_pre" in name:
lowerCAmelCase__ : Any = name.replace('visual.ln_pre' , 'vision_model.pre_layernorm' )
if "visual.ln_post" in name:
lowerCAmelCase__ : str = name.replace('visual.ln_post' , 'vision_model.post_layernorm' )
if "visual.proj" in name:
lowerCAmelCase__ : Union[str, Any] = name.replace('visual.proj' , 'visual_projection.weight' )
if "text_projection" in name:
lowerCAmelCase__ : Dict = name.replace('text_projection' , 'text_projection.weight' )
# things on top
if "prompts_visual_proj" in name:
lowerCAmelCase__ : List[str] = name.replace('prompts_visual_proj' , 'prompts_visual_projection' )
if "prompts_visual_ln" in name:
lowerCAmelCase__ : Dict = name.replace('prompts_visual_ln' , 'prompts_visual_layernorm' )
# mit
if name == "mit.positional_embedding":
lowerCAmelCase__ : str = name.replace('positional' , 'position' )
if name.startswith('mit.resblocks' ):
lowerCAmelCase__ : Dict = name.replace('mit.resblocks' , 'mit.encoder.layers' )
# prompts generator
if name.startswith('prompts_generator.norm' ):
lowerCAmelCase__ : Union[str, Any] = name.replace('prompts_generator.norm' , 'prompts_generator.layernorm' )
return name
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
for key in orig_state_dict.copy().keys():
lowerCAmelCase__ : Dict = orig_state_dict.pop(_lowercase )
if "attn.in_proj" in key:
lowerCAmelCase__ : Optional[Any] = key.split('.' )
if key.startswith('visual' ):
lowerCAmelCase__ : Any = key_split[3]
lowerCAmelCase__ : Optional[Any] = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
lowerCAmelCase__ : str = val[
:dim, :
]
lowerCAmelCase__ : Optional[int] = val[
dim : dim * 2, :
]
lowerCAmelCase__ : Union[str, Any] = val[
-dim:, :
]
else:
lowerCAmelCase__ : Dict = val[
:dim
]
lowerCAmelCase__ : Optional[int] = val[
dim : dim * 2
]
lowerCAmelCase__ : Optional[int] = val[
-dim:
]
else:
if "weight" in key:
lowerCAmelCase__ : Optional[Any] = val[
:dim, :
]
lowerCAmelCase__ : List[str] = val[
dim : dim * 2, :
]
lowerCAmelCase__ : Dict = val[
-dim:, :
]
else:
lowerCAmelCase__ : Union[str, Any] = val[:dim]
lowerCAmelCase__ : Union[str, Any] = val[
dim : dim * 2
]
lowerCAmelCase__ : Union[str, Any] = val[-dim:]
elif key.startswith('mit' ):
lowerCAmelCase__ : Tuple = key_split[2]
lowerCAmelCase__ : Union[str, Any] = config.vision_config.mit_hidden_size
if "weight" in key:
lowerCAmelCase__ : Optional[int] = val[:dim, :]
lowerCAmelCase__ : Optional[int] = val[dim : dim * 2, :]
lowerCAmelCase__ : str = val[-dim:, :]
else:
lowerCAmelCase__ : str = val[:dim]
lowerCAmelCase__ : Any = val[dim : dim * 2]
lowerCAmelCase__ : int = val[-dim:]
else:
lowerCAmelCase__ : Tuple = key_split[2]
lowerCAmelCase__ : Any = config.text_config.hidden_size
if "weight" in key:
lowerCAmelCase__ : Dict = val[:dim, :]
lowerCAmelCase__ : Dict = val[
dim : dim * 2, :
]
lowerCAmelCase__ : List[str] = val[-dim:, :]
else:
lowerCAmelCase__ : Any = val[:dim]
lowerCAmelCase__ : Tuple = val[
dim : dim * 2
]
lowerCAmelCase__ : List[str] = val[-dim:]
else:
lowerCAmelCase__ : Optional[int] = rename_key(_lowercase )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
lowerCAmelCase__ : Optional[Any] = val.T
lowerCAmelCase__ : Tuple = val
return orig_state_dict
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
if num_frames == 8:
lowerCAmelCase__ : str = """eating_spaghetti_8_frames.npy"""
elif num_frames == 16:
lowerCAmelCase__ : int = """eating_spaghetti.npy"""
elif num_frames == 32:
lowerCAmelCase__ : List[str] = """eating_spaghetti_32_frames.npy"""
lowerCAmelCase__ : int = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename=_lowercase , repo_type='dataset' , )
lowerCAmelCase__ : Union[str, Any] = np.load(_lowercase )
return list(_lowercase )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False ) -> Dict:
lowerCAmelCase__ : List[Any] = {
# fully supervised kinetics-400 checkpoints
"""xclip-base-patch32""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth""",
"""xclip-base-patch32-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"""
),
"""xclip-base-patch16""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth""",
"""xclip-base-patch16-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"""
),
"""xclip-large-patch14""": """https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb""",
"""xclip-large-patch14-16-frames""": """https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f""",
# fully supervised kinetics-600 checkpoints
"""xclip-base-patch16-kinetics-600""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"""
),
"""xclip-base-patch16-kinetics-600-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"""
),
"""xclip-large-patch14-kinetics-600""": """https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be""",
# few shot
"""xclip-base-patch16-hmdb-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"""
),
"""xclip-base-patch16-hmdb-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"""
),
"""xclip-base-patch16-hmdb-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"""
),
"""xclip-base-patch16-hmdb-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"""
),
"""xclip-base-patch16-ucf-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"""
),
"""xclip-base-patch16-ucf-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"""
),
"""xclip-base-patch16-ucf-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"""
),
"""xclip-base-patch16-ucf-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"""
),
# zero shot
"""xclip-base-patch16-zero-shot""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth""",
}
lowerCAmelCase__ : Optional[int] = model_to_url[model_name]
lowerCAmelCase__ : int = 8
if "16-frames" in model_name:
lowerCAmelCase__ : List[Any] = 16
elif "shot" in model_name:
lowerCAmelCase__ : Dict = 32
lowerCAmelCase__ : Optional[int] = get_xclip_config(_lowercase , _lowercase )
lowerCAmelCase__ : Optional[Any] = XCLIPModel(_lowercase )
model.eval()
if "drive" in checkpoint_url:
lowerCAmelCase__ : List[str] = """pytorch_model.bin"""
gdown.cached_download(_lowercase , _lowercase , quiet=_lowercase )
lowerCAmelCase__ : List[Any] = torch.load(_lowercase , map_location='cpu' )["""model"""]
else:
lowerCAmelCase__ : Tuple = torch.hub.load_state_dict_from_url(_lowercase )["""model"""]
lowerCAmelCase__ : Union[str, Any] = convert_state_dict(_lowercase , _lowercase )
lowerCAmelCase__ : str = XCLIPModel(_lowercase )
lowerCAmelCase__ : Optional[int] = model.load_state_dict(_lowercase , strict=_lowercase )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
lowerCAmelCase__ : List[str] = 336 if model_name == """xclip-large-patch14-16-frames""" else 224
lowerCAmelCase__ : List[Any] = VideoMAEImageProcessor(size=_lowercase )
lowerCAmelCase__ : Any = CLIPTokenizer.from_pretrained('openai/clip-vit-base-patch32' )
lowerCAmelCase__ : str = CLIPTokenizerFast.from_pretrained('openai/clip-vit-base-patch32' )
lowerCAmelCase__ : Optional[Any] = XCLIPProcessor(image_processor=_lowercase , tokenizer=_lowercase )
lowerCAmelCase__ : Optional[int] = prepare_video(_lowercase )
lowerCAmelCase__ : Optional[Any] = processor(
text=['playing sports', 'eating spaghetti', 'go shopping'] , videos=_lowercase , return_tensors='pt' , padding=_lowercase )
print('Shape of pixel values:' , inputs.pixel_values.shape )
with torch.no_grad():
lowerCAmelCase__ : List[Any] = model(**_lowercase )
# Verify outputs
lowerCAmelCase__ : List[Any] = outputs.logits_per_video
lowerCAmelCase__ : Any = logits_per_video.softmax(dim=1 )
print('Probs:' , _lowercase )
# kinetics-400
if model_name == "xclip-base-patch32":
lowerCAmelCase__ : Union[str, Any] = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
lowerCAmelCase__ : str = torch.tensor([[7.0999e-04, 9.9883e-01, 4.5580e-04]] )
elif model_name == "xclip-base-patch16":
lowerCAmelCase__ : Tuple = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
lowerCAmelCase__ : Any = torch.tensor([[7.6937e-04, 9.9728e-01, 1.9473e-03]] )
elif model_name == "xclip-large-patch14":
lowerCAmelCase__ : str = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
lowerCAmelCase__ : Tuple = torch.tensor([[3.3877e-04, 9.9937e-01, 2.8888e-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
lowerCAmelCase__ : List[Any] = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
lowerCAmelCase__ : Union[str, Any] = torch.tensor([[3.8554e-04, 9.9929e-01, 3.2754e-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
lowerCAmelCase__ : List[Any] = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
lowerCAmelCase__ : Dict = torch.tensor([[7.1890e-06, 9.9994e-01, 5.6559e-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
lowerCAmelCase__ : Union[str, Any] = torch.tensor([[1.0320e-05, 9.9993e-01, 6.2435e-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
lowerCAmelCase__ : str = torch.tensor([[4.1377e-06, 9.9990e-01, 9.8386e-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
lowerCAmelCase__ : str = torch.tensor([[4.1347e-05, 9.9962e-01, 3.3411e-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
lowerCAmelCase__ : int = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
lowerCAmelCase__ : Optional[int] = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
lowerCAmelCase__ : Any = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
lowerCAmelCase__ : Tuple = torch.tensor([[9.8219e-04, 9.9593e-01, 3.0863e-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
lowerCAmelCase__ : Union[str, Any] = torch.tensor([[3.5082e-04, 9.9785e-01, 1.7966e-03]] )
else:
raise ValueError(F'''Model name {model_name} not supported''' )
assert torch.allclose(_lowercase , _lowercase , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowercase )
if push_to_hub:
print('Pushing model, processor and slow tokenizer files to the hub...' )
model.push_to_hub(_lowercase , organization='nielsr' )
processor.push_to_hub(_lowercase , organization='nielsr' )
slow_tokenizer.push_to_hub(_lowercase , organization='nielsr' )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""xclip-base-patch32""",
type=str,
help="""Name of the model.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCamelCase__ = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 212 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :Union[str, Any] = os.path.join(args.tf_model_dir, """parameters.json""" )
snake_case_ :Any = json.loads(open(_lowercase ).read() )
if not params:
raise ValueError(
f"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith(""".pt""" ):
snake_case_ :Optional[int] = args.output + """.pt"""
snake_case_ :List[str] = OrderedDict()
with tf.device("""/CPU:0""" ):
snake_case_ :Dict = tf.train.load_checkpoint(args.tf_model_dir )
snake_case_ :str = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
snake_case_ :List[Any] = reader.get_tensor(_lowercase ).astype(np.floataa )
if key_name.endswith("""/adam_m""" ) or key_name.endswith("""/adam_v""" ):
continue
if key_name.startswith("""pasts/""" ):
if key_name.startswith("""pasts/mlp""" ):
snake_case_ :Any = int(key_name[9] )
elif key_name.startswith("""pasts/out""" ):
snake_case_ :Optional[int] = 8
snake_case_ :List[str] = """model.sqout.%d.weight""" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
snake_case_ :Optional[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ :List[str] = torch.tensor(_lowercase )
elif key_name.startswith("""model/moe""" ):
snake_case_ :Tuple = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/switch_gating/kernel""" ):
snake_case_ :Union[str, Any] = """model.blocks.%d.feed_forward.mlp.router.classifier.weight""" % player
snake_case_ :Optional[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Optional[Any] = torch.tensor(_lowercase )
elif key_name.endswith("""/softmlp/kernel""" ):
snake_case_ :List[Any] = """model.blocks.%d.feed_forward.soft_bypass_mlp.weight""" % player
snake_case_ :Optional[int] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Optional[Any] = torch.tensor(_lowercase )
elif key_name.endswith("""/wo/kernel""" ) or key_name.endswith("""/wi/kernel""" ):
snake_case_ :Dict = key_name[-9:-7]
for i in range(16 ):
snake_case_ :str = """model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight""" % (player, i, nlayer)
snake_case_ :Tuple = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
snake_case_ :Optional[int] = torch.tensor(_lowercase )
elif key_name.startswith("""model/mlp""" ):
snake_case_ :Optional[int] = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/p1/kernel""" ):
snake_case_ :Union[str, Any] = """model.blocks.%d.feed_forward.mlp.wi.weight""" % player
snake_case_ :Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Optional[Any] = torch.tensor(_lowercase )
elif key_name.endswith("""/p1/bias""" ):
snake_case_ :List[Any] = """model.blocks.%d.feed_forward.mlp.wi.bias""" % player
snake_case_ :str = vnp.copy() # same because it is one dimensional
snake_case_ :Optional[Any] = torch.tensor(_lowercase )
elif key_name.endswith("""/p2/kernel""" ):
snake_case_ :Union[str, Any] = """model.blocks.%d.feed_forward.mlp.wo.weight""" % player
snake_case_ :Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Tuple = torch.tensor(_lowercase )
elif key_name.endswith("""/p2/bias""" ):
snake_case_ :Dict = """model.blocks.%d.feed_forward.mlp.wo.bias""" % player
snake_case_ :Any = vnp.copy() # same because it is one dimensional
snake_case_ :Optional[int] = torch.tensor(_lowercase )
elif key_name.startswith("""model/ln""" ):
snake_case_ :Union[str, Any] = int(key_name[8:].split("""/""" )[0] )
if key_name.endswith("""/b""" ):
snake_case_ :str = """model.blocks.%d.feed_forward.norm.bias""" % player
snake_case_ :Dict = vnp.copy() # same because it is one dimensional
snake_case_ :int = torch.tensor(_lowercase )
elif key_name.endswith("""/g""" ):
snake_case_ :Dict = """model.blocks.%d.feed_forward.norm.weight""" % player
snake_case_ :Dict = vnp.copy() # same because it is one dimensional
snake_case_ :Tuple = torch.tensor(_lowercase )
elif key_name.startswith("""model/att""" ):
snake_case_ :List[str] = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/qkv/kernel""" ):
snake_case_ :Optional[int] = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
snake_case_ :Dict = state[:, 0, :, :]
snake_case_ :int = state[:, 1, :, :]
snake_case_ :List[str] = state[:, 2, :, :]
snake_case_ :str = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Any = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Optional[int] = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ :int = """model.blocks.%d.self_attn.self_attn.q_proj.weight""" % player
snake_case_ :int = torch.tensor(_lowercase )
snake_case_ :Optional[Any] = """model.blocks.%d.self_attn.self_attn.k_proj.weight""" % player
snake_case_ :Dict = torch.tensor(_lowercase )
snake_case_ :Dict = """model.blocks.%d.self_attn.self_attn.v_proj.weight""" % player
snake_case_ :Optional[Any] = torch.tensor(_lowercase )
elif key_name.endswith("""/o/kernel""" ):
snake_case_ :str = """model.blocks.%d.self_attn.self_attn.out_proj.weight""" % player
snake_case_ :str = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Any = torch.tensor(_lowercase )
elif key_name.startswith("""model/an""" ):
snake_case_ :Optional[int] = int(key_name[8:].split("""/""" )[0] )
if key_name.endswith("""/b""" ):
snake_case_ :Any = """model.blocks.%d.self_attn.norm.bias""" % player
snake_case_ :Optional[int] = vnp.copy() # same because it is one dimensional
snake_case_ :Tuple = torch.tensor(_lowercase )
elif key_name.endswith("""/g""" ):
snake_case_ :Union[str, Any] = """model.blocks.%d.self_attn.norm.weight""" % player
snake_case_ :Dict = vnp.copy() # same because it is one dimensional
snake_case_ :Optional[int] = torch.tensor(_lowercase )
elif (
key_name.startswith("""model/wte""" )
or key_name.startswith("""model/wpe""" )
or key_name.startswith("""model/ete""" )
):
snake_case_ :List[Any] = {"""wte""": """embed_tokens""", """wpe""": """position_embeddings""", """ete""": """extra_position_embeddings"""}[
key_name[-3:]
]
snake_case_ :Optional[Any] = """model.%s.weight""" % nlayer
snake_case_ :Any = vnp.copy() # same in embedded
snake_case_ :List[Any] = torch.tensor(_lowercase )
if key_name.startswith("""model/wte""" ):
snake_case_ :Tuple = """lm_head.weight"""
snake_case_ :List[str] = vnp.copy() # same in embedded
snake_case_ :List[Any] = torch.tensor(_lowercase )
elif key_name.startswith("""model/wob""" ):
snake_case_ :str = """final_logits_bias"""
snake_case_ :Any = vnp.copy() # same in embedded
snake_case_ :List[Any] = state.reshape((1, -1) )
snake_case_ :Union[str, Any] = torch.tensor(_lowercase )
elif key_name == "model/dense/kernel":
snake_case_ :str = """model.last_project.weight"""
snake_case_ :Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ :int = torch.tensor(_lowercase )
elif key_name == "model/dense_1/bias":
snake_case_ :Optional[int] = """model.last_project.bias"""
snake_case_ :Tuple = vnp.copy() # same because it is one dimensional
snake_case_ :Any = torch.tensor(_lowercase )
torch.save(_lowercase, args.output )
if __name__ == "__main__":
__a = argparse.ArgumentParser(
description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model")
parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model")
__a = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 66 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json',
}
class lowerCamelCase__( _lowerCAmelCase):
UpperCAmelCase__ : Dict = """data2vec-text"""
def __init__( self: int , UpperCamelCase_: Tuple=3_05_22 , UpperCamelCase_: Dict=7_68 , UpperCamelCase_: Any=12 , UpperCamelCase_: Tuple=12 , UpperCamelCase_: List[str]=30_72 , UpperCamelCase_: Dict="gelu" , UpperCamelCase_: str=0.1 , UpperCamelCase_: str=0.1 , UpperCamelCase_: Optional[Any]=5_12 , UpperCamelCase_: Any=2 , UpperCamelCase_: Tuple=0.02 , UpperCamelCase_: str=1E-12 , UpperCamelCase_: Any=1 , UpperCamelCase_: str=0 , UpperCamelCase_: Union[str, Any]=2 , UpperCamelCase_: List[Any]="absolute" , UpperCamelCase_: List[str]=True , UpperCamelCase_: Any=None , **UpperCamelCase_: Union[str, Any] , ):
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = position_embedding_type
__lowerCamelCase = use_cache
__lowerCamelCase = classifier_dropout
class lowerCamelCase__( _lowerCAmelCase):
@property
def lowerCAmelCase__ ( self: Tuple ):
if self.task == "multiple-choice":
__lowerCamelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__lowerCamelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 12 |
"""simple docstring"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
__a = pd.read_csv("sample_data.csv", header=None)
__a = df.shape[:1][0]
# If you're using some other dataset input the target column
__a = df.iloc[:, 1:2]
__a = actual_data.values.reshape(len_data, 1)
__a = MinMaxScaler().fit_transform(actual_data)
__a = 10
__a = 5
__a = 20
__a = len_data - periods * look_back
__a = actual_data[:division]
__a = actual_data[division - look_back :]
__a , __a = [], []
__a , __a = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
__a = np.array(train_x)
__a = np.array(test_x)
__a = np.array([list(i.ravel()) for i in train_y])
__a = np.array([list(i.ravel()) for i in test_y])
__a = Sequential()
model.add(LSTM(1_28, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(1_28, 1)))
model.add(Dense(forward_days))
model.compile(loss="mean_squared_error", optimizer="adam")
__a = model.fit(
x_train, y_train, epochs=1_50, verbose=1, shuffle=True, batch_size=4
)
__a = model.predict(x_test)
| 66 | 0 |
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
return [sentence[i : i + ngram_size] for i in range(len(_lowercase ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 313 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a = {
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 66 | 0 |
from __future__ import annotations
from typing import Any
def _a ( lowerCamelCase ):
create_state_space_tree(_lowercase, [], 0 )
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
if index == len(_lowercase ):
print(_lowercase )
return
create_state_space_tree(_lowercase, _lowercase, index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(_lowercase, _lowercase, index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
_lowerCamelCase =[3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["""A""", """B""", """C"""])
generate_all_subsequences(seq)
| 287 |
"""simple docstring"""
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :int = XCLIPTextConfig()
# derive patch size from model name
snake_case_ :Union[str, Any] = model_name.find("""patch""" )
snake_case_ :List[str] = int(model_name[start_idx + len("""patch""" ) : start_idx + len("""patch""" ) + 2] )
snake_case_ :Any = XCLIPVisionConfig(patch_size=_lowercase, num_frames=_lowercase )
if "large" in model_name:
snake_case_ :Optional[Any] = 768
snake_case_ :Union[str, Any] = 3072
snake_case_ :Any = 12
snake_case_ :Any = 1024
snake_case_ :str = 4096
snake_case_ :Union[str, Any] = 16
snake_case_ :Union[str, Any] = 24
snake_case_ :Tuple = 768
snake_case_ :Any = 3072
if model_name == "xclip-large-patch14-16-frames":
snake_case_ :Any = 336
snake_case_ :Any = XCLIPConfig.from_text_vision_configs(_lowercase, _lowercase )
if "large" in model_name:
snake_case_ :List[Any] = 768
return config
def A_ ( _lowercase ):
'''simple docstring'''
if name == "token_embedding.weight":
snake_case_ :Optional[Any] = name.replace("""token_embedding.weight""", """text_model.embeddings.token_embedding.weight""" )
if name == "positional_embedding":
snake_case_ :Tuple = name.replace("""positional_embedding""", """text_model.embeddings.position_embedding.weight""" )
if "ln_1" in name:
snake_case_ :Dict = name.replace("""ln_1""", """layer_norm1""" )
if "ln_2" in name:
snake_case_ :str = name.replace("""ln_2""", """layer_norm2""" )
if "c_fc" in name:
snake_case_ :str = name.replace("""c_fc""", """fc1""" )
if "c_proj" in name:
snake_case_ :int = name.replace("""c_proj""", """fc2""" )
if name.startswith("""transformer.resblocks""" ):
snake_case_ :Union[str, Any] = name.replace("""transformer.resblocks""", """text_model.encoder.layers""" )
if "attn.out_proj" in name and "message" not in name:
snake_case_ :Union[str, Any] = name.replace("""attn.out_proj""", """self_attn.out_proj""" )
if "ln_final" in name:
snake_case_ :Union[str, Any] = name.replace("""ln_final""", """text_model.final_layer_norm""" )
# visual encoder
if name == "visual.class_embedding":
snake_case_ :Any = name.replace("""visual.class_embedding""", """vision_model.embeddings.class_embedding""" )
if name == "visual.positional_embedding":
snake_case_ :Optional[int] = name.replace("""visual.positional_embedding""", """vision_model.embeddings.position_embedding.weight""" )
if name.startswith("""visual.transformer.resblocks""" ):
snake_case_ :Union[str, Any] = name.replace("""visual.transformer.resblocks""", """vision_model.encoder.layers""" )
if "visual.conv1" in name:
snake_case_ :int = name.replace("""visual.conv1""", """vision_model.embeddings.patch_embedding""" )
if "visual.ln_pre" in name:
snake_case_ :Any = name.replace("""visual.ln_pre""", """vision_model.pre_layernorm""" )
if "visual.ln_post" in name:
snake_case_ :str = name.replace("""visual.ln_post""", """vision_model.post_layernorm""" )
if "visual.proj" in name:
snake_case_ :Union[str, Any] = name.replace("""visual.proj""", """visual_projection.weight""" )
if "text_projection" in name:
snake_case_ :Dict = name.replace("""text_projection""", """text_projection.weight""" )
# things on top
if "prompts_visual_proj" in name:
snake_case_ :List[str] = name.replace("""prompts_visual_proj""", """prompts_visual_projection""" )
if "prompts_visual_ln" in name:
snake_case_ :Dict = name.replace("""prompts_visual_ln""", """prompts_visual_layernorm""" )
# mit
if name == "mit.positional_embedding":
snake_case_ :str = name.replace("""positional""", """position""" )
if name.startswith("""mit.resblocks""" ):
snake_case_ :Dict = name.replace("""mit.resblocks""", """mit.encoder.layers""" )
# prompts generator
if name.startswith("""prompts_generator.norm""" ):
snake_case_ :Union[str, Any] = name.replace("""prompts_generator.norm""", """prompts_generator.layernorm""" )
return name
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
snake_case_ :Dict = orig_state_dict.pop(_lowercase )
if "attn.in_proj" in key:
snake_case_ :Optional[Any] = key.split(""".""" )
if key.startswith("""visual""" ):
snake_case_ :Any = key_split[3]
snake_case_ :Optional[Any] = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
snake_case_ :str = val[
:dim, :
]
snake_case_ :Optional[int] = val[
dim : dim * 2, :
]
snake_case_ :Union[str, Any] = val[
-dim:, :
]
else:
snake_case_ :Dict = val[
:dim
]
snake_case_ :Optional[int] = val[
dim : dim * 2
]
snake_case_ :Optional[int] = val[
-dim:
]
else:
if "weight" in key:
snake_case_ :Optional[Any] = val[
:dim, :
]
snake_case_ :List[str] = val[
dim : dim * 2, :
]
snake_case_ :Dict = val[
-dim:, :
]
else:
snake_case_ :Union[str, Any] = val[:dim]
snake_case_ :Union[str, Any] = val[
dim : dim * 2
]
snake_case_ :Union[str, Any] = val[-dim:]
elif key.startswith("""mit""" ):
snake_case_ :Tuple = key_split[2]
snake_case_ :Union[str, Any] = config.vision_config.mit_hidden_size
if "weight" in key:
snake_case_ :Optional[int] = val[:dim, :]
snake_case_ :Optional[int] = val[dim : dim * 2, :]
snake_case_ :str = val[-dim:, :]
else:
snake_case_ :str = val[:dim]
snake_case_ :Any = val[dim : dim * 2]
snake_case_ :int = val[-dim:]
else:
snake_case_ :Tuple = key_split[2]
snake_case_ :Any = config.text_config.hidden_size
if "weight" in key:
snake_case_ :Dict = val[:dim, :]
snake_case_ :Dict = val[
dim : dim * 2, :
]
snake_case_ :List[str] = val[-dim:, :]
else:
snake_case_ :Any = val[:dim]
snake_case_ :Tuple = val[
dim : dim * 2
]
snake_case_ :List[str] = val[-dim:]
else:
snake_case_ :Optional[int] = rename_key(_lowercase )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
snake_case_ :Optional[Any] = val.T
snake_case_ :Tuple = val
return orig_state_dict
def A_ ( _lowercase ):
'''simple docstring'''
if num_frames == 8:
snake_case_ :str = """eating_spaghetti_8_frames.npy"""
elif num_frames == 16:
snake_case_ :int = """eating_spaghetti.npy"""
elif num_frames == 32:
snake_case_ :List[str] = """eating_spaghetti_32_frames.npy"""
snake_case_ :int = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""", filename=_lowercase, repo_type="""dataset""", )
snake_case_ :Union[str, Any] = np.load(_lowercase )
return list(_lowercase )
def A_ ( _lowercase, _lowercase=None, _lowercase=False ):
'''simple docstring'''
snake_case_ :List[Any] = {
# fully supervised kinetics-400 checkpoints
"""xclip-base-patch32""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth""",
"""xclip-base-patch32-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"""
),
"""xclip-base-patch16""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth""",
"""xclip-base-patch16-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"""
),
"""xclip-large-patch14""": """https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb""",
"""xclip-large-patch14-16-frames""": """https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f""",
# fully supervised kinetics-600 checkpoints
"""xclip-base-patch16-kinetics-600""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"""
),
"""xclip-base-patch16-kinetics-600-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"""
),
"""xclip-large-patch14-kinetics-600""": """https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be""",
# few shot
"""xclip-base-patch16-hmdb-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"""
),
"""xclip-base-patch16-hmdb-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"""
),
"""xclip-base-patch16-hmdb-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"""
),
"""xclip-base-patch16-hmdb-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"""
),
"""xclip-base-patch16-ucf-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"""
),
"""xclip-base-patch16-ucf-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"""
),
"""xclip-base-patch16-ucf-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"""
),
"""xclip-base-patch16-ucf-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"""
),
# zero shot
"""xclip-base-patch16-zero-shot""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth""",
}
snake_case_ :Optional[int] = model_to_url[model_name]
snake_case_ :int = 8
if "16-frames" in model_name:
snake_case_ :List[Any] = 16
elif "shot" in model_name:
snake_case_ :Dict = 32
snake_case_ :Optional[int] = get_xclip_config(_lowercase, _lowercase )
snake_case_ :Optional[Any] = XCLIPModel(_lowercase )
model.eval()
if "drive" in checkpoint_url:
snake_case_ :List[str] = """pytorch_model.bin"""
gdown.cached_download(_lowercase, _lowercase, quiet=_lowercase )
snake_case_ :List[Any] = torch.load(_lowercase, map_location="""cpu""" )["""model"""]
else:
snake_case_ :Tuple = torch.hub.load_state_dict_from_url(_lowercase )["""model"""]
snake_case_ :Union[str, Any] = convert_state_dict(_lowercase, _lowercase )
snake_case_ :str = XCLIPModel(_lowercase )
snake_case_, snake_case_ :Optional[int] = model.load_state_dict(_lowercase, strict=_lowercase )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
snake_case_ :List[str] = 336 if model_name == """xclip-large-patch14-16-frames""" else 224
snake_case_ :List[Any] = VideoMAEImageProcessor(size=_lowercase )
snake_case_ :Any = CLIPTokenizer.from_pretrained("""openai/clip-vit-base-patch32""" )
snake_case_ :str = CLIPTokenizerFast.from_pretrained("""openai/clip-vit-base-patch32""" )
snake_case_ :Optional[Any] = XCLIPProcessor(image_processor=_lowercase, tokenizer=_lowercase )
snake_case_ :Optional[int] = prepare_video(_lowercase )
snake_case_ :Optional[Any] = processor(
text=["""playing sports""", """eating spaghetti""", """go shopping"""], videos=_lowercase, return_tensors="""pt""", padding=_lowercase )
print("""Shape of pixel values:""", inputs.pixel_values.shape )
with torch.no_grad():
snake_case_ :List[Any] = model(**_lowercase )
# Verify outputs
snake_case_ :List[Any] = outputs.logits_per_video
snake_case_ :Any = logits_per_video.softmax(dim=1 )
print("""Probs:""", _lowercase )
# kinetics-400
if model_name == "xclip-base-patch32":
snake_case_ :Union[str, Any] = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
snake_case_ :str = torch.tensor([[7.09_99e-04, 9.98_83e-01, 4.55_80e-04]] )
elif model_name == "xclip-base-patch16":
snake_case_ :Tuple = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
snake_case_ :Any = torch.tensor([[7.69_37e-04, 9.97_28e-01, 1.94_73e-03]] )
elif model_name == "xclip-large-patch14":
snake_case_ :str = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
snake_case_ :Tuple = torch.tensor([[3.38_77e-04, 9.99_37e-01, 2.88_88e-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
snake_case_ :List[Any] = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
snake_case_ :Union[str, Any] = torch.tensor([[3.85_54e-04, 9.99_29e-01, 3.27_54e-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
snake_case_ :List[Any] = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
snake_case_ :Dict = torch.tensor([[7.18_90e-06, 9.99_94e-01, 5.65_59e-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
snake_case_ :Union[str, Any] = torch.tensor([[1.03_20e-05, 9.99_93e-01, 6.24_35e-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
snake_case_ :str = torch.tensor([[4.13_77e-06, 9.99_90e-01, 9.83_86e-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
snake_case_ :str = torch.tensor([[4.13_47e-05, 9.99_62e-01, 3.34_11e-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
snake_case_ :int = torch.tensor([[8.58_57e-05, 9.99_28e-01, 6.32_91e-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
snake_case_ :Optional[int] = torch.tensor([[8.58_57e-05, 9.99_28e-01, 6.32_91e-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
snake_case_ :Any = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
snake_case_ :Tuple = torch.tensor([[9.82_19e-04, 9.95_93e-01, 3.08_63e-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
snake_case_ :Union[str, Any] = torch.tensor([[3.50_82e-04, 9.97_85e-01, 1.79_66e-03]] )
else:
raise ValueError(f"""Model name {model_name} not supported""" )
assert torch.allclose(_lowercase, _lowercase, atol=1e-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
if push_to_hub:
print("""Pushing model, processor and slow tokenizer files to the hub...""" )
model.push_to_hub(_lowercase, organization="""nielsr""" )
processor.push_to_hub(_lowercase, organization="""nielsr""" )
slow_tokenizer.push_to_hub(_lowercase, organization="""nielsr""" )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="xclip-base-patch32",
type=str,
help="Name of the model.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__a = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 66 | 0 |
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
lowerCamelCase_ = logging.getLogger(__name__)
lowerCamelCase_ = tf.data.AUTOTUNE
def __magic_name__ ( ):
'''simple docstring'''
UpperCamelCase__ = argparse.ArgumentParser(description="""Train a masked language model on TPU.""" )
parser.add_argument(
"""--pretrained_model_config""" , type=_lowercase , default="""roberta-base""" , help="""The model config to use. Note that we don't copy the model's weights, only the config!""" , )
parser.add_argument(
"""--tokenizer""" , type=_lowercase , default="""unigram-tokenizer-wikitext""" , help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""" , )
parser.add_argument(
"""--per_replica_batch_size""" , type=_lowercase , default=8 , help="""Batch size per TPU core.""" , )
parser.add_argument(
"""--no_tpu""" , action="""store_true""" , help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""" , )
parser.add_argument(
"""--tpu_name""" , type=_lowercase , help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""" , default="""local""" , )
parser.add_argument(
"""--tpu_zone""" , type=_lowercase , help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""" , )
parser.add_argument(
"""--gcp_project""" , type=_lowercase , help="""Google cloud project name. Only used for non-Colab TPU nodes.""" )
parser.add_argument(
"""--bfloat16""" , action="""store_true""" , help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""" , )
parser.add_argument(
"""--train_dataset""" , type=_lowercase , help="""Path to training dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--shuffle_buffer_size""" , type=_lowercase , default=2**18 , help="""Size of the shuffle buffer (in samples)""" , )
parser.add_argument(
"""--eval_dataset""" , type=_lowercase , help="""Path to evaluation dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--num_epochs""" , type=_lowercase , default=1 , help="""Number of epochs to train for.""" , )
parser.add_argument(
"""--learning_rate""" , type=_lowercase , default=1E-4 , help="""Learning rate to use for training.""" , )
parser.add_argument(
"""--weight_decay_rate""" , type=_lowercase , default=1E-3 , help="""Weight decay rate to use for training.""" , )
parser.add_argument(
"""--max_length""" , type=_lowercase , default=512 , help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""" , )
parser.add_argument(
"""--mlm_probability""" , type=_lowercase , default=0.15 , help="""Fraction of tokens to mask during training.""" , )
parser.add_argument("""--output_dir""" , type=_lowercase , required=_lowercase , help="""Path to save model checkpoints to.""" )
parser.add_argument("""--hub_model_id""" , type=_lowercase , help="""Model ID to upload to on the Hugging Face Hub.""" )
UpperCamelCase__ = parser.parse_args()
return args
def __magic_name__ ( __a : int ):
'''simple docstring'''
try:
if args.tpu_name:
UpperCamelCase__ = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
UpperCamelCase__ = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"""Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """
"""--gcp_project. When running on a TPU VM, use --tpu_name local.""" )
tf.config.experimental_connect_to_cluster(_lowercase )
tf.tpu.experimental.initialize_tpu_system(_lowercase )
return tpu
def __magic_name__ ( __a : int ):
'''simple docstring'''
UpperCamelCase__ = 0
for file in file_list:
UpperCamelCase__ = file.split("""/""" )[-1]
UpperCamelCase__ = re.search(R"""-\d+-(\d+)\.tfrecord""" , _lowercase ).group(1 )
UpperCamelCase__ = int(_lowercase )
num_samples += sample_count
return num_samples
def __magic_name__ ( __a : List[Any] , __a : Optional[Any] , __a : Any , __a : List[str] , __a : Any , __a : int=None ):
'''simple docstring'''
UpperCamelCase__ = count_samples(_lowercase )
UpperCamelCase__ = tf.data.Dataset.from_tensor_slices(_lowercase )
if shuffle:
UpperCamelCase__ = dataset.shuffle(len(_lowercase ) )
UpperCamelCase__ = tf.data.TFRecordDataset(_lowercase , num_parallel_reads=_lowercase )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
UpperCamelCase__ = dataset.apply(tf.data.experimental.assert_cardinality(_lowercase ) )
UpperCamelCase__ = dataset.map(_lowercase , num_parallel_calls=_lowercase )
if shuffle:
assert shuffle_buffer_size is not None
UpperCamelCase__ = dataset.shuffle(args.shuffle_buffer_size )
UpperCamelCase__ = dataset.batch(_lowercase , drop_remainder=_lowercase )
UpperCamelCase__ = dataset.map(_lowercase , num_parallel_calls=_lowercase )
UpperCamelCase__ = dataset.prefetch(_lowercase )
return dataset
def __magic_name__ ( __a : List[str] ):
'''simple docstring'''
if not args.no_tpu:
UpperCamelCase__ = initialize_tpu(_lowercase )
UpperCamelCase__ = tf.distribute.TPUStrategy(_lowercase )
else:
UpperCamelCase__ = tf.distribute.OneDeviceStrategy(device="""/gpu:0""" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" )
UpperCamelCase__ = AutoTokenizer.from_pretrained(args.tokenizer )
UpperCamelCase__ = AutoConfig.from_pretrained(args.pretrained_model_config )
UpperCamelCase__ = tokenizer.vocab_size
UpperCamelCase__ = tf.io.gfile.glob(os.path.join(args.train_dataset , """*.tfrecord""" ) )
if not training_records:
raise ValueError(f"No .tfrecord files found in {args.train_dataset}." )
UpperCamelCase__ = tf.io.gfile.glob(os.path.join(args.eval_dataset , """*.tfrecord""" ) )
if not eval_records:
raise ValueError(f"No .tfrecord files found in {args.eval_dataset}." )
UpperCamelCase__ = count_samples(_lowercase )
UpperCamelCase__ = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
UpperCamelCase__ = steps_per_epoch * args.num_epochs
with strategy.scope():
UpperCamelCase__ = TFAutoModelForMaskedLM.from_config(_lowercase )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
UpperCamelCase__ = create_optimizer(
num_train_steps=_lowercase , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=_lowercase , metrics=["""accuracy"""] )
def decode_fn(__a : Dict ):
UpperCamelCase__ = {
"""input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"""attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(_lowercase , _lowercase )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
UpperCamelCase__ = DataCollatorForLanguageModeling(
tokenizer=_lowercase , mlm_probability=args.mlm_probability , mlm=_lowercase , return_tensors="""tf""" )
def mask_with_collator(__a : List[Any] ):
# TF really needs an isin() function
UpperCamelCase__ = (
~tf.cast(batch["""attention_mask"""] , tf.bool )
| (batch["""input_ids"""] == tokenizer.cls_token_id)
| (batch["""input_ids"""] == tokenizer.sep_token_id)
)
UpperCamelCase__ = data_collator.tf_mask_tokens(
batch["""input_ids"""] , vocab_size=len(_lowercase ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=_lowercase , )
return batch
UpperCamelCase__ = args.per_replica_batch_size * strategy.num_replicas_in_sync
UpperCamelCase__ = prepare_dataset(
_lowercase , decode_fn=_lowercase , mask_fn=_lowercase , batch_size=_lowercase , shuffle=_lowercase , shuffle_buffer_size=args.shuffle_buffer_size , )
UpperCamelCase__ = prepare_dataset(
_lowercase , decode_fn=_lowercase , mask_fn=_lowercase , batch_size=_lowercase , shuffle=_lowercase , )
UpperCamelCase__ = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=_lowercase ) )
model.fit(
_lowercase , validation_data=_lowercase , epochs=args.num_epochs , callbacks=_lowercase , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
lowerCamelCase_ = parse_args()
main(args)
| 244 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self: List[Any] , snake_case: List[str] , snake_case: Optional[Any]=13 , snake_case: List[str]=7 , snake_case: Dict=True , snake_case: List[str]=True , snake_case: Optional[int]=True , snake_case: Any=True , snake_case: Optional[Any]=99 , snake_case: Tuple=32 , snake_case: Tuple=5 , snake_case: Dict=4 , snake_case: Optional[Any]=37 , snake_case: Union[str, Any]="gelu" , snake_case: Tuple=0.1 , snake_case: List[Any]=0.1 , snake_case: List[str]=512 , snake_case: Optional[int]=16 , snake_case: int=2 , snake_case: List[Any]=0.0_2 , snake_case: Union[str, Any]=4 , ) -> List[str]:
snake_case_ :Dict = parent
snake_case_ :Any = batch_size
snake_case_ :Any = seq_length
snake_case_ :List[str] = is_training
snake_case_ :Optional[Any] = use_attention_mask
snake_case_ :Dict = use_token_type_ids
snake_case_ :Union[str, Any] = use_labels
snake_case_ :str = vocab_size
snake_case_ :int = hidden_size
snake_case_ :List[str] = num_hidden_layers
snake_case_ :Dict = num_attention_heads
snake_case_ :Any = intermediate_size
snake_case_ :Tuple = hidden_act
snake_case_ :int = hidden_dropout_prob
snake_case_ :Optional[Any] = attention_probs_dropout_prob
snake_case_ :Any = max_position_embeddings
snake_case_ :Union[str, Any] = type_vocab_size
snake_case_ :Optional[int] = type_sequence_label_size
snake_case_ :Union[str, Any] = initializer_range
snake_case_ :Tuple = num_choices
def lowerCAmelCase_ ( self: Tuple ) -> str:
snake_case_ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ :Union[str, Any] = None
if self.use_attention_mask:
snake_case_ :str = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ :Any = None
if self.use_token_type_ids:
snake_case_ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ :int = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase_ ( self: Optional[int] ) -> int:
snake_case_ :str = self.prepare_config_and_inputs()
snake_case_, snake_case_, snake_case_, snake_case_ :Optional[int] = config_and_inputs
snake_case_ :Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowerCAmelCase_ ( self: Optional[Any] ) -> Any:
snake_case_ :int = self.prepare_config_and_inputs()
snake_case_, snake_case_, snake_case_, snake_case_ :Dict = config_and_inputs
snake_case_ :Union[str, Any] = True
snake_case_ :Optional[int] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case_ :Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class lowerCamelCase ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_A : List[str] = True
_A : Dict = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase_ ( self: int ) -> List[str]:
snake_case_ :Any = FlaxBertModelTester(self )
@slow
def lowerCAmelCase_ ( self: List[str] ) -> Dict:
# Only check this for base model, not necessary for all model classes.
# This will also help speed-up tests.
snake_case_ :Dict = FlaxBertModel.from_pretrained("""bert-base-cased""" )
snake_case_ :Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(snake_case )
| 66 | 0 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class __A ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __A , __A=7 , __A=3 , __A=30 , __A=400 , __A=True , __A=None , __A=True , __A=1 / 255 , __A=True , __A=[0.5, 0.5, 0.5] , __A=[0.5, 0.5, 0.5] , __A=True , ) -> Any:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
a =size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
a =parent
a =batch_size
a =num_channels
a =min_resolution
a =max_resolution
a =do_resize
a =size
a =do_rescale
a =rescale_factor
a =do_normalize
a =image_mean
a =image_std
a =do_pad
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE ( self , __A , __A=False ) -> Union[str, Any]:
if not batched:
a =image_inputs[0]
if isinstance(__A , Image.Image ):
a =image.size
else:
a =image.shape[1], image.shape[2]
if w < h:
a =int(self.size['''shortest_edge'''] * h / w )
a =self.size["""shortest_edge"""]
elif w > h:
a =self.size["""shortest_edge"""]
a =int(self.size['''shortest_edge'''] * w / h )
else:
a =self.size["""shortest_edge"""]
a =self.size["""shortest_edge"""]
else:
a =[]
for image in image_inputs:
a =self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
a =max(__A , key=lambda __A : item[0] )[0]
a =max(__A , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __A ( _lowerCAmelCase, unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = DetrImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a =DetrImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self ) -> Any:
a =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , '''image_mean''' ) )
self.assertTrue(hasattr(__A , '''image_std''' ) )
self.assertTrue(hasattr(__A , '''do_normalize''' ) )
self.assertTrue(hasattr(__A , '''do_rescale''' ) )
self.assertTrue(hasattr(__A , '''rescale_factor''' ) )
self.assertTrue(hasattr(__A , '''do_resize''' ) )
self.assertTrue(hasattr(__A , '''size''' ) )
self.assertTrue(hasattr(__A , '''do_pad''' ) )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , __A )
a =self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__A )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , __A )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
pass
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
# Initialize image_processing
a =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a =prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
a =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
a =self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a =self.image_processor_tester.get_expected_values(__A , batched=__A )
a =image_processing(__A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
# Initialize image_processing
a =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a =prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
a =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
a =self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a =image_processing(__A , return_tensors='''pt''' ).pixel_values
a =self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
# Initialize image_processing
a =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a =prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
a =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
a =self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a =image_processing(__A , return_tensors='''pt''' ).pixel_values
a =self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
# prepare image and target
a =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
a =json.loads(f.read() )
a ={"""image_id""": 3_9769, """annotations""": target}
# encode them
a =DetrImageProcessor.from_pretrained('''facebook/detr-resnet-50''' )
a =image_processing(images=__A , annotations=__A , return_tensors='''pt''' )
# verify pixel values
a =torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , __A )
a =torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __A , atol=1E-4 ) )
# verify area
a =torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __A ) )
# verify boxes
a =torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __A )
a =torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __A , atol=1E-3 ) )
# verify image_id
a =torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __A ) )
# verify is_crowd
a =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __A ) )
# verify class_labels
a =torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __A ) )
# verify orig_size
a =torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __A ) )
# verify size
a =torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __A ) )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
# prepare image, target and masks_path
a =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
a =json.loads(f.read() )
a ={"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target}
a =pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
a =DetrImageProcessor.from_pretrained('''facebook/detr-resnet-50-panoptic''' )
a =image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors='''pt''' )
# verify pixel values
a =torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , __A )
a =torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __A , atol=1E-4 ) )
# verify area
a =torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __A ) )
# verify boxes
a =torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __A )
a =torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __A , atol=1E-3 ) )
# verify image_id
a =torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __A ) )
# verify is_crowd
a =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __A ) )
# verify class_labels
a =torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __A ) )
# verify masks
a =82_2873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __A )
# verify orig_size
a =torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __A ) )
# verify size
a =torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __A ) ) | 81 |
"""simple docstring"""
import math
class lowerCamelCase :
'''simple docstring'''
def lowerCAmelCase_ ( self: Tuple , snake_case: list[list[float]] , snake_case: list[int] ) -> int:
snake_case_ :Any = 0.0
snake_case_ :Tuple = 0.0
for i in range(len(snake_case ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def lowerCAmelCase_ ( self: Optional[int] , snake_case: list[list[int | float]] , snake_case: list[int] , snake_case: int , snake_case: float ) -> list[list[int | float]]:
for i in range(len(snake_case ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def A_ ( ):
'''simple docstring'''
snake_case_ :Dict = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
snake_case_ :List[Any] = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
snake_case_ :Optional[Any] = SelfOrganizingMap()
snake_case_ :Dict = 3
snake_case_ :Dict = 0.5
for _ in range(_lowercase ):
for j in range(len(_lowercase ) ):
# training sample
snake_case_ :List[Any] = training_samples[j]
# Compute the winning vector
snake_case_ :Optional[int] = self_organizing_map.get_winner(_lowercase, _lowercase )
# Update the winning vector
snake_case_ :List[str] = self_organizing_map.update(_lowercase, _lowercase, _lowercase, _lowercase )
# classify test sample
snake_case_ :str = [0, 0, 0, 1]
snake_case_ :List[Any] = self_organizing_map.get_winner(_lowercase, _lowercase )
# results
print(f"""Clusters that the test sample belongs to : {winner}""" )
print(f"""Weights that have been trained : {weights}""" )
# running the main() function
if __name__ == "__main__":
main()
| 66 | 0 |
'''simple docstring'''
class A__ :
def __init__( self :Tuple , SCREAMING_SNAKE_CASE :str = "" , SCREAMING_SNAKE_CASE :bool = False ) -> None:
'''simple docstring'''
# Mapping from the first character of the prefix of the node
_a : dict[str, RadixNode] ={}
# A node will be a leaf if the tree contains its word
_a : Union[str, Any] =is_leaf
_a : Dict =prefix
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :str ) -> tuple[str, str, str]:
'''simple docstring'''
_a : int =0
for q, w in zip(self.prefix , SCREAMING_SNAKE_CASE ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :list[str] ) -> None:
'''simple docstring'''
for word in words:
self.insert(SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :str ) -> None:
'''simple docstring'''
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
_a : int =True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
_a : List[Any] =RadixNode(prefix=SCREAMING_SNAKE_CASE , is_leaf=SCREAMING_SNAKE_CASE )
else:
_a : Dict =self.nodes[word[0]]
_a : Optional[int] =incoming_node.match(
SCREAMING_SNAKE_CASE )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(SCREAMING_SNAKE_CASE )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
_a : Union[str, Any] =remaining_prefix
_a : List[str] =self.nodes[matching_string[0]]
_a : Optional[int] =RadixNode(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : str =aux_node
if remaining_word == "":
_a : Optional[int] =True
else:
self.nodes[matching_string[0]].insert(SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :str ) -> bool:
'''simple docstring'''
_a : Union[str, Any] =self.nodes.get(word[0] , SCREAMING_SNAKE_CASE )
if not incoming_node:
return False
else:
_a : int =incoming_node.match(
SCREAMING_SNAKE_CASE )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :str ) -> bool:
'''simple docstring'''
_a : List[str] =self.nodes.get(word[0] , SCREAMING_SNAKE_CASE )
if not incoming_node:
return False
else:
_a : Any =incoming_node.match(
SCREAMING_SNAKE_CASE )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(SCREAMING_SNAKE_CASE )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
_a : List[str] =list(self.nodes.values() )[0]
_a : Optional[Any] =merging_node.is_leaf
self.prefix += merging_node.prefix
_a : Dict =merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
_a : List[str] =False
# If there is 1 edge, we merge it with its child
else:
_a : Optional[Any] =list(incoming_node.nodes.values() )[0]
_a : Optional[Any] =merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
_a : List[str] =merging_node.nodes
return True
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :int = 0 ) -> None:
'''simple docstring'''
if self.prefix != "":
print("""-""" * height , self.prefix , """ (leaf)""" if self.is_leaf else """""" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def SCREAMING_SNAKE_CASE_ ( ) -> List[str]:
_a : Any ="""banana bananas bandana band apple all beast""".split()
_a : List[Any] =RadixNode()
root.insert_many(_lowercase )
assert all(root.find(_lowercase ) for word in words )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def SCREAMING_SNAKE_CASE_ ( ) -> Tuple:
assert test_trie()
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[Any]:
_a : Dict =RadixNode()
_a : Dict ="""banana bananas bandanas bandana band apple all beast""".split()
root.insert_many(_lowercase )
print("""Words:""" ,_lowercase )
print("""Tree:""" )
root.print_tree()
if __name__ == "__main__":
main()
| 276 |
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase :
'''simple docstring'''
def __init__( self: Optional[int] , snake_case: Any , snake_case: Optional[Any]=13 , snake_case: Tuple=32 , snake_case: Optional[int]=2 , snake_case: Tuple=3 , snake_case: Tuple=16 , snake_case: Optional[Any]=[1, 2, 1] , snake_case: Optional[int]=[2, 2, 4] , snake_case: Optional[int]=2 , snake_case: int=2.0 , snake_case: Union[str, Any]=True , snake_case: List[str]=0.0 , snake_case: List[Any]=0.0 , snake_case: Optional[Any]=0.1 , snake_case: List[Any]="gelu" , snake_case: Optional[int]=False , snake_case: Union[str, Any]=True , snake_case: Union[str, Any]=0.0_2 , snake_case: Optional[int]=1E-5 , snake_case: Optional[Any]=True , snake_case: List[Any]=None , snake_case: List[Any]=True , snake_case: Optional[Any]=10 , snake_case: str=8 , ) -> Tuple:
snake_case_ :Dict = parent
snake_case_ :Any = batch_size
snake_case_ :List[Any] = image_size
snake_case_ :List[Any] = patch_size
snake_case_ :int = num_channels
snake_case_ :Tuple = embed_dim
snake_case_ :str = depths
snake_case_ :str = num_heads
snake_case_ :Optional[int] = window_size
snake_case_ :Tuple = mlp_ratio
snake_case_ :Any = qkv_bias
snake_case_ :List[Any] = hidden_dropout_prob
snake_case_ :Optional[Any] = attention_probs_dropout_prob
snake_case_ :Union[str, Any] = drop_path_rate
snake_case_ :Any = hidden_act
snake_case_ :Optional[Any] = use_absolute_embeddings
snake_case_ :Union[str, Any] = patch_norm
snake_case_ :Dict = layer_norm_eps
snake_case_ :str = initializer_range
snake_case_ :Tuple = is_training
snake_case_ :Tuple = scope
snake_case_ :Union[str, Any] = use_labels
snake_case_ :Optional[Any] = type_sequence_label_size
snake_case_ :Dict = encoder_stride
def lowerCAmelCase_ ( self: int ) -> int:
snake_case_ :List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ :Any = None
if self.use_labels:
snake_case_ :str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ :int = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ ( self: str ) -> Union[str, Any]:
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def lowerCAmelCase_ ( self: str , snake_case: Optional[int] , snake_case: Dict , snake_case: str ) -> List[Any]:
snake_case_ :Union[str, Any] = SwinvaModel(config=snake_case )
model.to(snake_case )
model.eval()
snake_case_ :Optional[int] = model(snake_case )
snake_case_ :Optional[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
snake_case_ :int = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowerCAmelCase_ ( self: int , snake_case: List[str] , snake_case: Tuple , snake_case: int ) -> Any:
snake_case_ :Dict = SwinvaForMaskedImageModeling(config=snake_case )
model.to(snake_case )
model.eval()
snake_case_ :Tuple = model(snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
snake_case_ :List[Any] = 1
snake_case_ :int = SwinvaForMaskedImageModeling(snake_case )
model.to(snake_case )
model.eval()
snake_case_ :Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ :int = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowerCAmelCase_ ( self: List[Any] , snake_case: Any , snake_case: List[str] , snake_case: Union[str, Any] ) -> Tuple:
snake_case_ :int = self.type_sequence_label_size
snake_case_ :List[Any] = SwinvaForImageClassification(snake_case )
model.to(snake_case )
model.eval()
snake_case_ :Dict = model(snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase_ ( self: int ) -> str:
snake_case_ :Any = self.prepare_config_and_inputs()
snake_case_, snake_case_, snake_case_ :List[str] = config_and_inputs
snake_case_ :List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_A : Optional[Any] = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
_A : Any = (
{"""feature-extraction""": SwinvaModel, """image-classification""": SwinvaForImageClassification}
if is_torch_available()
else {}
)
_A : List[Any] = False
_A : List[str] = False
_A : Tuple = False
_A : List[str] = False
def lowerCAmelCase_ ( self: Dict ) -> List[Any]:
snake_case_ :Optional[int] = SwinvaModelTester(self )
snake_case_ :List[str] = ConfigTester(self , config_class=snake_case , embed_dim=37 )
def lowerCAmelCase_ ( self: Union[str, Any] ) -> List[Any]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Tuple:
snake_case_ :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
@unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" )
def lowerCAmelCase_ ( self: Union[str, Any] ) -> str:
pass
@unittest.skip(reason="""Swinv2 does not use inputs_embeds""" )
def lowerCAmelCase_ ( self: int ) -> Dict:
pass
def lowerCAmelCase_ ( self: List[str] ) -> Union[str, Any]:
snake_case_, snake_case_ :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ :Optional[int] = model_class(snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case_ :List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case , nn.Linear ) )
def lowerCAmelCase_ ( self: Dict ) -> Optional[int]:
snake_case_, snake_case_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ :Optional[int] = model_class(snake_case )
snake_case_ :List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ :int = [*signature.parameters.keys()]
snake_case_ :List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case )
def lowerCAmelCase_ ( self: List[str] ) -> Optional[Any]:
snake_case_, snake_case_ :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ :List[str] = True
for model_class in self.all_model_classes:
snake_case_ :List[Any] = True
snake_case_ :Any = False
snake_case_ :Optional[int] = True
snake_case_ :Tuple = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
snake_case_ :Any = model(**self._prepare_for_class(snake_case , snake_case ) )
snake_case_ :str = outputs.attentions
snake_case_ :Dict = len(self.model_tester.depths )
self.assertEqual(len(snake_case ) , snake_case )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case_ :Union[str, Any] = True
snake_case_ :Tuple = config.window_size**2
snake_case_ :Any = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
snake_case_ :Union[str, Any] = model(**self._prepare_for_class(snake_case , snake_case ) )
snake_case_ :int = outputs.attentions
self.assertEqual(len(snake_case ) , snake_case )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
snake_case_ :Any = len(snake_case )
# Check attention is always last and order is fine
snake_case_ :int = True
snake_case_ :Dict = True
snake_case_ :Optional[int] = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
snake_case_ :Dict = model(**self._prepare_for_class(snake_case , snake_case ) )
if hasattr(self.model_tester , """num_hidden_states_types""" ):
snake_case_ :Any = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
snake_case_ :int = 2
self.assertEqual(out_len + added_hidden_states , len(snake_case ) )
snake_case_ :str = outputs.attentions
self.assertEqual(len(snake_case ) , snake_case )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def lowerCAmelCase_ ( self: int , snake_case: Dict , snake_case: Dict , snake_case: Optional[Any] , snake_case: Dict ) -> List[str]:
snake_case_ :Dict = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
snake_case_ :Optional[int] = model(**self._prepare_for_class(snake_case , snake_case ) )
snake_case_ :str = outputs.hidden_states
snake_case_ :List[Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(snake_case ) , snake_case )
# Swinv2 has a different seq_length
snake_case_ :List[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case_ :Optional[int] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
snake_case_ :str = outputs.reshaped_hidden_states
self.assertEqual(len(snake_case ) , snake_case )
snake_case_, snake_case_, snake_case_, snake_case_ :Any = reshaped_hidden_states[0].shape
snake_case_ :int = (
reshaped_hidden_states[0].view(snake_case , snake_case , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowerCAmelCase_ ( self: Any ) -> Any:
snake_case_, snake_case_ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ :Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
snake_case_ :Union[str, Any] = True
self.check_hidden_states_output(snake_case , snake_case , snake_case , snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ :List[str] = True
self.check_hidden_states_output(snake_case , snake_case , snake_case , snake_case )
def lowerCAmelCase_ ( self: Tuple ) -> Any:
snake_case_, snake_case_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ :Optional[int] = 3
snake_case_ :Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
snake_case_ :str = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case_ :Any = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
snake_case_ :int = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
snake_case_ :str = True
self.check_hidden_states_output(snake_case , snake_case , snake_case , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ :Tuple = True
self.check_hidden_states_output(snake_case , snake_case , snake_case , (padded_height, padded_width) )
def lowerCAmelCase_ ( self: Any ) -> Tuple:
snake_case_ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*snake_case )
def lowerCAmelCase_ ( self: Optional[int] ) -> Dict:
snake_case_ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
@slow
def lowerCAmelCase_ ( self: List[Any] ) -> Dict:
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ :List[str] = SwinvaModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def lowerCAmelCase_ ( self: Optional[int] ) -> List[Any]:
snake_case_, snake_case_ :str = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ :Optional[int] = _config_zero_init(snake_case )
for model_class in self.all_model_classes:
snake_case_ :Tuple = model_class(config=snake_case )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ ( self: Optional[int] ) -> List[Any]:
return (
AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase_ ( self: List[str] ) -> List[str]:
snake_case_ :Tuple = SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to(
snake_case )
snake_case_ :str = self.default_image_processor
snake_case_ :List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
snake_case_ :str = image_processor(images=snake_case , return_tensors="""pt""" ).to(snake_case )
# forward pass
with torch.no_grad():
snake_case_ :Tuple = model(**snake_case )
# verify the logits
snake_case_ :Dict = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , snake_case )
snake_case_ :int = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1E-4 ) )
| 66 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( a_: int ):
return sum(i for i in range(1, number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
__a = int(input('Enter number: ').strip())
print(f'{number} is {"" if perfect(number) else "not "}a Perfect Number.') | 145 |
"""simple docstring"""
import re
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :Optional[int] = re.compile(
r"""^(?:0|94|\+94|0{2}94)""" r"""7(0|1|2|4|5|6|7|8)""" r"""(-| |)""" r"""\d{7}$""" )
return bool(re.search(_lowercase, _lowercase ) )
if __name__ == "__main__":
__a = "0094702343221"
print(is_sri_lankan_phone_number(phone))
| 66 | 0 |
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
if collection == []:
return []
# get some information about the collection
_UpperCAmelCase = len(_lowercase )
_UpperCAmelCase = max(_lowercase )
_UpperCAmelCase = min(_lowercase )
# create the counting array
_UpperCAmelCase = coll_max + 1 - coll_min
_UpperCAmelCase = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , _lowercase ):
_UpperCAmelCase = counting_arr[i] + counting_arr[i - 1]
# create the output collection
_UpperCAmelCase = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , _lowercase ) ):
_UpperCAmelCase = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def lowercase ( _SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
return "".join([chr(_lowercase ) for i in counting_sort([ord(_lowercase ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("thisisthestring") == "eghhiiinrsssttt"
__A : List[str] = input("Enter numbers separated by a comma:\n").strip()
__A : List[str] = [int(item) for item in user_input.split(",")]
print(counting_sort(unsorted))
| 260 |
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
__a = {
"distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"bert": (BertConfig, BertForMaskedLM, BertTokenizer),
"gpt2": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def A_ ( _lowercase ):
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
if args.student_type == "roberta":
snake_case_ :Tuple = False
elif args.student_type == "gpt2":
snake_case_ :Union[str, Any] = False
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
if args.student_type == "roberta":
snake_case_ :List[str] = False
def A_ ( ):
'''simple docstring'''
snake_case_ :Union[str, Any] = argparse.ArgumentParser(description="""Training""" )
parser.add_argument("""--force""", action="""store_true""", help="""Overwrite dump_path if it already exists.""" )
parser.add_argument(
"""--dump_path""", type=_lowercase, required=_lowercase, help="""The output directory (log, checkpoints, parameters, etc.)""" )
parser.add_argument(
"""--data_file""", type=_lowercase, required=_lowercase, help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""", )
parser.add_argument(
"""--student_type""", type=_lowercase, choices=["""distilbert""", """roberta""", """gpt2"""], required=_lowercase, help="""The student type (DistilBERT, RoBERTa).""", )
parser.add_argument("""--student_config""", type=_lowercase, required=_lowercase, help="""Path to the student configuration.""" )
parser.add_argument(
"""--student_pretrained_weights""", default=_lowercase, type=_lowercase, help="""Load student initialization checkpoint.""" )
parser.add_argument(
"""--teacher_type""", choices=["""bert""", """roberta""", """gpt2"""], required=_lowercase, help="""Teacher type (BERT, RoBERTa).""" )
parser.add_argument("""--teacher_name""", type=_lowercase, required=_lowercase, help="""The teacher model.""" )
parser.add_argument("""--temperature""", default=2.0, type=_lowercase, help="""Temperature for the softmax temperature.""" )
parser.add_argument(
"""--alpha_ce""", default=0.5, type=_lowercase, help="""Linear weight for the distillation loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_mlm""", default=0.0, type=_lowercase, help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""", )
parser.add_argument("""--alpha_clm""", default=0.5, type=_lowercase, help="""Linear weight for the CLM loss. Must be >=0.""" )
parser.add_argument("""--alpha_mse""", default=0.0, type=_lowercase, help="""Linear weight of the MSE loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_cos""", default=0.0, type=_lowercase, help="""Linear weight of the cosine embedding loss. Must be >=0.""" )
parser.add_argument(
"""--mlm""", action="""store_true""", help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""" )
parser.add_argument(
"""--mlm_mask_prop""", default=0.15, type=_lowercase, help="""Proportion of tokens for which we need to make a prediction.""", )
parser.add_argument("""--word_mask""", default=0.8, type=_lowercase, help="""Proportion of tokens to mask out.""" )
parser.add_argument("""--word_keep""", default=0.1, type=_lowercase, help="""Proportion of tokens to keep.""" )
parser.add_argument("""--word_rand""", default=0.1, type=_lowercase, help="""Proportion of tokens to randomly replace.""" )
parser.add_argument(
"""--mlm_smoothing""", default=0.7, type=_lowercase, help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""", )
parser.add_argument("""--token_counts""", type=_lowercase, help="""The token counts in the data_file for MLM.""" )
parser.add_argument(
"""--restrict_ce_to_mask""", action="""store_true""", help="""If true, compute the distillation loss only the [MLM] prediction distribution.""", )
parser.add_argument(
"""--freeze_pos_embs""", action="""store_true""", help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""", )
parser.add_argument(
"""--freeze_token_type_embds""", action="""store_true""", help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""", )
parser.add_argument("""--n_epoch""", type=_lowercase, default=3, help="""Number of pass on the whole dataset.""" )
parser.add_argument("""--batch_size""", type=_lowercase, default=5, help="""Batch size (for each process).""" )
parser.add_argument(
"""--group_by_size""", action="""store_false""", help="""If true, group sequences that have similar length into the same batch. Default is true.""", )
parser.add_argument(
"""--gradient_accumulation_steps""", type=_lowercase, default=50, help="""Gradient accumulation for larger training batches.""", )
parser.add_argument("""--warmup_prop""", default=0.05, type=_lowercase, help="""Linear warmup proportion.""" )
parser.add_argument("""--weight_decay""", default=0.0, type=_lowercase, help="""Weight decay if we apply some.""" )
parser.add_argument("""--learning_rate""", default=5e-4, type=_lowercase, help="""The initial learning rate for Adam.""" )
parser.add_argument("""--adam_epsilon""", default=1e-6, type=_lowercase, help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--max_grad_norm""", default=5.0, type=_lowercase, help="""Max gradient norm.""" )
parser.add_argument("""--initializer_range""", default=0.02, type=_lowercase, help="""Random initialization range.""" )
parser.add_argument(
"""--fp16""", action="""store_true""", help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""", )
parser.add_argument(
"""--fp16_opt_level""", type=_lowercase, default="""O1""", help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
), )
parser.add_argument("""--n_gpu""", type=_lowercase, default=1, help="""Number of GPUs in the node.""" )
parser.add_argument("""--local_rank""", type=_lowercase, default=-1, help="""Distributed training - Local rank""" )
parser.add_argument("""--seed""", type=_lowercase, default=56, help="""Random seed""" )
parser.add_argument("""--log_interval""", type=_lowercase, default=500, help="""Tensorboard logging interval.""" )
parser.add_argument("""--checkpoint_interval""", type=_lowercase, default=4000, help="""Checkpoint interval.""" )
snake_case_ :Tuple = parser.parse_args()
sanity_checks(_lowercase )
# ARGS #
init_gpu_params(_lowercase )
set_seed(_lowercase )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"""
""" itUse `--force` if you want to overwrite it""" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f"""Experiment will be dumped and logged in {args.dump_path}""" )
# SAVE PARAMS #
logger.info(f"""Param: {args}""" )
with open(os.path.join(args.dump_path, """parameters.json""" ), """w""" ) as f:
json.dump(vars(_lowercase ), _lowercase, indent=4 )
git_log(args.dump_path )
snake_case_, snake_case_, snake_case_ :Any = MODEL_CLASSES[args.student_type]
snake_case_, snake_case_, snake_case_ :int = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
snake_case_ :Any = teacher_tokenizer_class.from_pretrained(args.teacher_name )
snake_case_ :Optional[Any] = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
snake_case_ :Union[str, Any] = tokenizer.all_special_tokens.index(_lowercase )
snake_case_ :Union[str, Any] = tokenizer.all_special_ids[idx]
logger.info(f"""Special tokens {special_tok_ids}""" )
snake_case_ :str = special_tok_ids
snake_case_ :Any = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f"""Loading data from {args.data_file}""" )
with open(args.data_file, """rb""" ) as fp:
snake_case_ :str = pickle.load(_lowercase )
if args.mlm:
logger.info(f"""Loading token counts from {args.token_counts} (already pre-computed)""" )
with open(args.token_counts, """rb""" ) as fp:
snake_case_ :Optional[Any] = pickle.load(_lowercase )
snake_case_ :Tuple = np.maximum(_lowercase, 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
snake_case_ :Optional[int] = 0.0 # do not predict special tokens
snake_case_ :int = torch.from_numpy(_lowercase )
else:
snake_case_ :List[str] = None
snake_case_ :Optional[int] = LmSeqsDataset(params=_lowercase, data=_lowercase )
logger.info("""Data loader created.""" )
# STUDENT #
logger.info(f"""Loading student config from {args.student_config}""" )
snake_case_ :List[Any] = student_config_class.from_pretrained(args.student_config )
snake_case_ :Union[str, Any] = True
if args.student_pretrained_weights is not None:
logger.info(f"""Loading pretrained weights from {args.student_pretrained_weights}""" )
snake_case_ :List[str] = student_model_class.from_pretrained(args.student_pretrained_weights, config=_lowercase )
else:
snake_case_ :Optional[int] = student_model_class(_lowercase )
if args.n_gpu > 0:
student.to(f"""cuda:{args.local_rank}""" )
logger.info("""Student loaded.""" )
# TEACHER #
snake_case_ :Dict = teacher_model_class.from_pretrained(args.teacher_name, output_hidden_states=_lowercase )
if args.n_gpu > 0:
teacher.to(f"""cuda:{args.local_rank}""" )
logger.info(f"""Teacher loaded from {args.teacher_name}.""" )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(_lowercase, _lowercase )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(_lowercase, _lowercase )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
snake_case_ :Optional[int] = Distiller(
params=_lowercase, dataset=_lowercase, token_probs=_lowercase, student=_lowercase, teacher=_lowercase )
distiller.train()
logger.info("""Let's go get some drinks.""" )
if __name__ == "__main__":
main()
| 66 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ) -> Optional[int]:
lowerCAmelCase_ : Union[str, Any] = tempfile.mkdtemp()
lowerCAmelCase_ : Any = SamImageProcessor()
lowerCAmelCase_ : Tuple = SamProcessor(__lowercase )
processor.save_pretrained(self.tmpdirname )
def lowercase_ ( self , **__lowercase ) -> Tuple:
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowercase ).image_processor
def lowercase_ ( self ) -> int:
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self ) -> Tuple:
lowerCAmelCase_ : Tuple = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
lowerCAmelCase_ : List[str] = [Image.fromarray(np.moveaxis(__lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase_ ( self ) -> Optional[int]:
lowerCAmelCase_ : Optional[int] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase_ : Optional[int] = self.get_image_processor(do_normalize=__lowercase , padding_value=1.0 )
lowerCAmelCase_ : Union[str, Any] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=__lowercase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowercase )
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : Dict = self.get_image_processor()
lowerCAmelCase_ : List[Any] = SamProcessor(image_processor=__lowercase )
lowerCAmelCase_ : int = self.prepare_image_inputs()
lowerCAmelCase_ : Dict = image_processor(__lowercase , return_tensors='''np''' )
lowerCAmelCase_ : Optional[Any] = processor(images=__lowercase , return_tensors='''np''' )
input_feat_extract.pop('''original_sizes''' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('''reshaped_input_sizes''' ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_torch
def lowercase_ ( self ) -> Optional[int]:
lowerCAmelCase_ : str = self.get_image_processor()
lowerCAmelCase_ : str = SamProcessor(image_processor=__lowercase )
lowerCAmelCase_ : Dict = [torch.ones((1, 3, 5, 5) )]
lowerCAmelCase_ : int = [[1_7_6_4, 2_6_4_6]]
lowerCAmelCase_ : Optional[Any] = [[6_8_3, 1_0_2_4]]
lowerCAmelCase_ : Any = processor.post_process_masks(__lowercase , __lowercase , __lowercase )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
lowerCAmelCase_ : Dict = processor.post_process_masks(
__lowercase , torch.tensor(__lowercase ) , torch.tensor(__lowercase ) )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
# should also work with np
lowerCAmelCase_ : str = [np.ones((1, 3, 5, 5) )]
lowerCAmelCase_ : Union[str, Any] = processor.post_process_masks(__lowercase , np.array(__lowercase ) , np.array(__lowercase ) )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
lowerCAmelCase_ : List[str] = [[1, 0], [0, 1]]
with self.assertRaises(__lowercase ):
lowerCAmelCase_ : int = processor.post_process_masks(__lowercase , np.array(__lowercase ) , np.array(__lowercase ) )
@require_vision
@require_tf
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ) -> str:
lowerCAmelCase_ : Dict = tempfile.mkdtemp()
lowerCAmelCase_ : Dict = SamImageProcessor()
lowerCAmelCase_ : int = SamProcessor(__lowercase )
processor.save_pretrained(self.tmpdirname )
def lowercase_ ( self , **__lowercase ) -> Any:
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowercase ).image_processor
def lowercase_ ( self ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : List[str] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
lowerCAmelCase_ : Optional[Any] = [Image.fromarray(np.moveaxis(__lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : Optional[int] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase_ : Optional[int] = self.get_image_processor(do_normalize=__lowercase , padding_value=1.0 )
lowerCAmelCase_ : Optional[Any] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=__lowercase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowercase )
def lowercase_ ( self ) -> Optional[int]:
lowerCAmelCase_ : Any = self.get_image_processor()
lowerCAmelCase_ : int = SamProcessor(image_processor=__lowercase )
lowerCAmelCase_ : List[Any] = self.prepare_image_inputs()
lowerCAmelCase_ : Optional[Any] = image_processor(__lowercase , return_tensors='''np''' )
lowerCAmelCase_ : List[str] = processor(images=__lowercase , return_tensors='''np''' )
input_feat_extract.pop('''original_sizes''' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('''reshaped_input_sizes''' ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_tf
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : Dict = self.get_image_processor()
lowerCAmelCase_ : Any = SamProcessor(image_processor=__lowercase )
lowerCAmelCase_ : Optional[int] = [tf.ones((1, 3, 5, 5) )]
lowerCAmelCase_ : Dict = [[1_7_6_4, 2_6_4_6]]
lowerCAmelCase_ : Dict = [[6_8_3, 1_0_2_4]]
lowerCAmelCase_ : Union[str, Any] = processor.post_process_masks(__lowercase , __lowercase , __lowercase , return_tensors='''tf''' )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
lowerCAmelCase_ : List[str] = processor.post_process_masks(
__lowercase , tf.convert_to_tensor(__lowercase ) , tf.convert_to_tensor(__lowercase ) , return_tensors='''tf''' , )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
# should also work with np
lowerCAmelCase_ : Dict = [np.ones((1, 3, 5, 5) )]
lowerCAmelCase_ : str = processor.post_process_masks(
__lowercase , np.array(__lowercase ) , np.array(__lowercase ) , return_tensors='''tf''' )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
lowerCAmelCase_ : Optional[Any] = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
lowerCAmelCase_ : List[str] = processor.post_process_masks(
__lowercase , np.array(__lowercase ) , np.array(__lowercase ) , return_tensors='''tf''' )
@require_vision
@require_torchvision
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ) -> str:
lowerCAmelCase_ : int = tempfile.mkdtemp()
lowerCAmelCase_ : str = SamImageProcessor()
lowerCAmelCase_ : Optional[int] = SamProcessor(__lowercase )
processor.save_pretrained(self.tmpdirname )
def lowercase_ ( self , **__lowercase ) -> List[str]:
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowercase ).image_processor
def lowercase_ ( self ) -> List[str]:
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self ) -> str:
lowerCAmelCase_ : Tuple = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
lowerCAmelCase_ : str = [Image.fromarray(np.moveaxis(__lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def lowercase_ ( self ) -> Tuple:
lowerCAmelCase_ : int = self.get_image_processor()
lowerCAmelCase_ : Optional[Any] = SamProcessor(image_processor=__lowercase )
lowerCAmelCase_ : Optional[int] = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
lowerCAmelCase_ : Optional[int] = [tf.convert_to_tensor(__lowercase )]
lowerCAmelCase_ : Optional[int] = [torch.tensor(__lowercase )]
lowerCAmelCase_ : Dict = [[1_7_6_4, 2_6_4_6]]
lowerCAmelCase_ : Optional[Any] = [[6_8_3, 1_0_2_4]]
lowerCAmelCase_ : List[str] = processor.post_process_masks(
__lowercase , __lowercase , __lowercase , return_tensors='''tf''' )
lowerCAmelCase_ : Union[str, Any] = processor.post_process_masks(
__lowercase , __lowercase , __lowercase , return_tensors='''pt''' )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : Optional[Any] = self.get_image_processor()
lowerCAmelCase_ : Any = SamProcessor(image_processor=__lowercase )
lowerCAmelCase_ : Union[str, Any] = self.prepare_image_inputs()
lowerCAmelCase_ : Optional[Any] = image_processor(__lowercase , return_tensors='''pt''' )["""pixel_values"""].numpy()
lowerCAmelCase_ : Dict = processor(images=__lowercase , return_tensors='''pt''' )["""pixel_values"""].numpy()
lowerCAmelCase_ : int = image_processor(__lowercase , return_tensors='''tf''' )["""pixel_values"""].numpy()
lowerCAmelCase_ : Optional[Any] = processor(images=__lowercase , return_tensors='''tf''' )["""pixel_values"""].numpy()
self.assertTrue(np.allclose(__lowercase , __lowercase ) )
self.assertTrue(np.allclose(__lowercase , __lowercase ) )
self.assertTrue(np.allclose(__lowercase , __lowercase ) ) | 262 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue_model_parallelism.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_6_0_0, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_6_0_0, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
] )
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self: Any ) -> str:
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=snake_case , )
assert hasattr(self , """env""" )
def lowerCAmelCase_ ( self: int , snake_case: Dict ) -> List[Any]:
# configuration for running training on smdistributed Model Parallel
snake_case_ :Tuple = {
"""enabled""": True,
"""processes_per_host""": 8,
}
snake_case_ :List[Any] = {
"""enabled""": True,
"""parameters""": {
"""microbatches""": 4,
"""placement_strategy""": """spread""",
"""pipeline""": """interleaved""",
"""optimize""": """speed""",
"""partitions""": 4,
"""ddp""": True,
},
}
snake_case_ :Tuple = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options}
snake_case_ :Any = """trainer""" if self.script == """run_glue.py""" else """smtrainer"""
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=snake_case , instance_type=self.instance_type , debugger_hook_config=snake_case , hyperparameters={
**self.env.hyperparameters,
"""model_name_or_path""": self.model_name_or_path,
"""max_steps""": 500,
} , metric_definitions=self.env.metric_definitions , distribution=snake_case , py_version="""py36""" , )
def lowerCAmelCase_ ( self: Any , snake_case: Tuple ) -> List[str]:
TrainingJobAnalytics(snake_case ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def lowerCAmelCase_ ( self: Dict , snake_case: Dict ) -> List[Any]:
# create estimator
snake_case_ :List[Any] = self.create_estimator(snake_case )
# run training
estimator.fit()
# result dataframe
snake_case_ :Any = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
snake_case_ :Tuple = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
snake_case_ :Dict = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
snake_case_ :int = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , snake_case )
| 66 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a :Optional[int] = logging.get_logger(__name__)
__a :List[Any] = {
'BAAI/AltCLIP': 'https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json',
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class _a ( _lowerCAmelCase ):
"""simple docstring"""
_lowerCamelCase : Dict = """altclip_text_model"""
def __init__( self : Tuple , UpperCAmelCase : List[Any]=250002 , UpperCAmelCase : Dict=1024 , UpperCAmelCase : Tuple=24 , UpperCAmelCase : int=16 , UpperCAmelCase : Any=4096 , UpperCAmelCase : Optional[int]="gelu" , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : Union[str, Any]=0.1 , UpperCAmelCase : List[str]=514 , UpperCAmelCase : List[Any]=1 , UpperCAmelCase : Any=0.02 , UpperCAmelCase : Optional[int]=0.02 , UpperCAmelCase : Union[str, Any]=1E-05 , UpperCAmelCase : List[Any]=1 , UpperCAmelCase : Tuple=0 , UpperCAmelCase : int=2 , UpperCAmelCase : Dict="absolute" , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : Tuple=768 , **UpperCAmelCase : Union[str, Any] , ):
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = hidden_act
A_ = intermediate_size
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = type_vocab_size
A_ = initializer_range
A_ = initializer_factor
A_ = layer_norm_eps
A_ = position_embedding_type
A_ = use_cache
A_ = project_dim
class _a ( _lowerCAmelCase ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = """altclip_vision_model"""
def __init__( self : Dict , UpperCAmelCase : Dict=768 , UpperCAmelCase : List[Any]=3072 , UpperCAmelCase : int=512 , UpperCAmelCase : List[Any]=12 , UpperCAmelCase : List[str]=12 , UpperCAmelCase : Optional[int]=3 , UpperCAmelCase : int=224 , UpperCAmelCase : Optional[Any]=32 , UpperCAmelCase : Optional[int]="quick_gelu" , UpperCAmelCase : Tuple=1E-5 , UpperCAmelCase : Tuple=0.0 , UpperCAmelCase : List[str]=0.02 , UpperCAmelCase : Union[str, Any]=1.0 , **UpperCAmelCase : str , ):
super().__init__(**UpperCAmelCase )
A_ = hidden_size
A_ = intermediate_size
A_ = projection_dim
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = num_channels
A_ = patch_size
A_ = image_size
A_ = initializer_range
A_ = initializer_factor
A_ = attention_dropout
A_ = layer_norm_eps
A_ = hidden_act
@classmethod
def __A ( cls : List[str] , UpperCAmelCase : Union[str, os.PathLike] , **UpperCAmelCase : List[Any] ):
cls._set_token_in_kwargs(UpperCAmelCase )
A_ = cls.get_config_dict(UpperCAmelCase , **UpperCAmelCase )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get("model_type" ) == "altclip":
A_ = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCAmelCase , **UpperCAmelCase )
class _a ( _lowerCAmelCase ):
"""simple docstring"""
_lowerCamelCase : str = """altclip"""
_lowerCamelCase : Any = True
def __init__( self : int , UpperCAmelCase : Any=None , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : str=768 , UpperCAmelCase : int=2.6_592 , **UpperCAmelCase : Union[str, Any] ):
# If `_config_dict` exist, we use them for the backward compatibility.
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
# of confusion!).
A_ = kwargs.pop("text_config_dict" , UpperCAmelCase )
A_ = kwargs.pop("vision_config_dict" , UpperCAmelCase )
super().__init__(**UpperCAmelCase )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
A_ = {}
# This is the complete result when using `text_config_dict`.
A_ = AltCLIPTextConfig(**UpperCAmelCase ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
A_ = (
f'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. '''
f'''The value `text_config_dict[\"{key}\"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
A_ = (
f'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '''
f'''value `text_config[\"{key}\"]` will be overriden.'''
)
logger.warning(UpperCAmelCase )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
A_ = {}
# This is the complete result when using `vision_config_dict`.
A_ = AltCLIPVisionConfig(**UpperCAmelCase ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
A_ = {
str(UpperCAmelCase ): value for key, value in _vision_config_dict["""id2label"""].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
A_ = (
f'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different '''
f'''values. The value `vision_config_dict[\"{key}\"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
A_ = (
f'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '''
f'''The value `vision_config[\"{key}\"]` will be overriden.'''
)
logger.warning(UpperCAmelCase )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
A_ = {}
logger.info("`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values." )
if vision_config is None:
A_ = {}
logger.info("`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values." )
A_ = AltCLIPTextConfig(**UpperCAmelCase )
A_ = AltCLIPVisionConfig(**UpperCAmelCase )
A_ = projection_dim
A_ = logit_scale_init_value
A_ = 1.0
@classmethod
def __A ( cls : str , UpperCAmelCase : AltCLIPTextConfig , UpperCAmelCase : AltCLIPVisionConfig , **UpperCAmelCase : int ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCAmelCase )
def __A ( self : int ):
A_ = copy.deepcopy(self.__dict__ )
A_ = self.text_config.to_dict()
A_ = self.vision_config.to_dict()
A_ = self.__class__.model_type
return output | 312 |
"""simple docstring"""
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class lowerCamelCase :
'''simple docstring'''
def __init__( self: Dict , snake_case: Optional[Any] , snake_case: Tuple=13 , snake_case: Any=32 , snake_case: Union[str, Any]=2 , snake_case: Tuple=3 , snake_case: Union[str, Any]=16 , snake_case: Union[str, Any]=[1, 2, 1] , snake_case: Optional[Any]=[2, 2, 4] , snake_case: str=2 , snake_case: List[str]=2.0 , snake_case: Optional[int]=True , snake_case: Union[str, Any]=0.0 , snake_case: Optional[int]=0.0 , snake_case: Optional[Any]=0.1 , snake_case: List[str]="gelu" , snake_case: Any=False , snake_case: Optional[Any]=True , snake_case: Optional[int]=0.0_2 , snake_case: Any=1E-5 , snake_case: Optional[int]=True , snake_case: int=None , snake_case: Any=True , snake_case: str=10 , snake_case: Optional[Any]=8 , snake_case: Union[str, Any]=["stage1", "stage2", "stage3"] , snake_case: Tuple=[1, 2, 3] , ) -> Dict:
snake_case_ :Dict = parent
snake_case_ :List[Any] = batch_size
snake_case_ :Dict = image_size
snake_case_ :Dict = patch_size
snake_case_ :Tuple = num_channels
snake_case_ :List[Any] = embed_dim
snake_case_ :List[str] = depths
snake_case_ :str = num_heads
snake_case_ :Tuple = window_size
snake_case_ :Tuple = mlp_ratio
snake_case_ :int = qkv_bias
snake_case_ :Tuple = hidden_dropout_prob
snake_case_ :Optional[Any] = attention_probs_dropout_prob
snake_case_ :Dict = drop_path_rate
snake_case_ :Any = hidden_act
snake_case_ :Any = use_absolute_embeddings
snake_case_ :int = patch_norm
snake_case_ :List[Any] = layer_norm_eps
snake_case_ :Tuple = initializer_range
snake_case_ :str = is_training
snake_case_ :int = scope
snake_case_ :Tuple = use_labels
snake_case_ :Tuple = type_sequence_label_size
snake_case_ :str = encoder_stride
snake_case_ :List[Any] = out_features
snake_case_ :str = out_indices
def lowerCAmelCase_ ( self: Tuple ) -> Dict:
snake_case_ :Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ :str = None
if self.use_labels:
snake_case_ :Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ :Union[str, Any] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ ( self: int ) -> Optional[Any]:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def lowerCAmelCase_ ( self: List[Any] , snake_case: str , snake_case: int , snake_case: List[str] ) -> Any:
snake_case_ :Dict = MaskFormerSwinModel(config=snake_case )
model.to(snake_case )
model.eval()
snake_case_ :Tuple = model(snake_case )
snake_case_ :Dict = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
snake_case_ :Any = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowerCAmelCase_ ( self: Optional[Any] , snake_case: int , snake_case: List[str] , snake_case: Tuple ) -> Union[str, Any]:
snake_case_ :Any = MaskFormerSwinBackbone(config=snake_case )
model.to(snake_case )
model.eval()
snake_case_ :Optional[Any] = model(snake_case )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(snake_case ):
snake_case_ :Optional[Any] = ["""stem"""]
snake_case_ :str = MaskFormerSwinBackbone(config=snake_case )
def lowerCAmelCase_ ( self: List[str] ) -> Optional[Any]:
snake_case_ :Optional[int] = self.prepare_config_and_inputs()
snake_case_, snake_case_, snake_case_ :str = config_and_inputs
snake_case_ :Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_A : Union[str, Any] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
_A : str = {"""feature-extraction""": MaskFormerSwinModel} if is_torch_available() else {}
_A : List[str] = False
_A : Any = False
_A : Dict = False
_A : List[Any] = False
_A : Optional[int] = False
def lowerCAmelCase_ ( self: Dict ) -> Any:
snake_case_ :str = MaskFormerSwinModelTester(self )
snake_case_ :Optional[Any] = ConfigTester(self , config_class=snake_case , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def lowerCAmelCase_ ( self: List[str] ) -> Optional[int]:
pass
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase_ ( self: Any ) -> Tuple:
return
def lowerCAmelCase_ ( self: Any ) -> Any:
snake_case_ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def lowerCAmelCase_ ( self: Union[str, Any] ) -> int:
snake_case_ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*snake_case )
@unittest.skip("""Swin does not use inputs_embeds""" )
def lowerCAmelCase_ ( self: str ) -> List[str]:
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def lowerCAmelCase_ ( self: int ) -> Optional[int]:
pass
def lowerCAmelCase_ ( self: List[str] ) -> List[Any]:
snake_case_, snake_case_ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ :str = model_class(snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case_ :Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case , nn.Linear ) )
def lowerCAmelCase_ ( self: Tuple ) -> Dict:
snake_case_, snake_case_ :int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ :Optional[int] = model_class(snake_case )
snake_case_ :str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ :str = [*signature.parameters.keys()]
snake_case_ :str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def lowerCAmelCase_ ( self: List[Any] ) -> List[Any]:
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def lowerCAmelCase_ ( self: Dict ) -> List[Any]:
pass
def lowerCAmelCase_ ( self: Union[str, Any] , snake_case: Union[str, Any] , snake_case: int , snake_case: Any , snake_case: List[str] ) -> str:
snake_case_ :List[str] = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
snake_case_ :List[Any] = model(**self._prepare_for_class(snake_case , snake_case ) )
snake_case_ :Any = outputs.hidden_states
snake_case_ :Optional[int] = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(snake_case ) , snake_case )
# Swin has a different seq_length
snake_case_ :str = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case_ :int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowerCAmelCase_ ( self: List[Any] ) -> Optional[int]:
snake_case_, snake_case_ :Any = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ :List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
snake_case_ :Tuple = True
self.check_hidden_states_output(snake_case , snake_case , snake_case , snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ :List[Any] = True
self.check_hidden_states_output(snake_case , snake_case , snake_case , snake_case )
def lowerCAmelCase_ ( self: Optional[Any] ) -> Tuple:
snake_case_, snake_case_ :int = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ :List[Any] = 3
snake_case_ :List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
snake_case_ :Any = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case_ :Tuple = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
snake_case_ :List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
snake_case_ :str = True
self.check_hidden_states_output(snake_case , snake_case , snake_case , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ :Any = True
self.check_hidden_states_output(snake_case , snake_case , snake_case , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def lowerCAmelCase_ ( self: Union[str, Any] ) -> List[str]:
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def lowerCAmelCase_ ( self: List[str] ) -> str:
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def lowerCAmelCase_ ( self: str ) -> List[Any]:
pass
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Optional[Any]:
snake_case_, snake_case_ :Dict = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(snake_case: str ):
snake_case_ :Optional[int] = 0
return t
def check_equivalence(snake_case: List[Any] , snake_case: Union[str, Any] , snake_case: int , snake_case: Tuple={} ):
with torch.no_grad():
snake_case_ :List[Any] = model(**snake_case , return_dict=snake_case , **snake_case )
snake_case_ :Any = model(**snake_case , return_dict=snake_case , **snake_case ).to_tuple()
def recursive_check(snake_case: List[Any] , snake_case: int ):
if isinstance(snake_case , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(snake_case , snake_case ):
recursive_check(snake_case , snake_case )
elif isinstance(snake_case , snake_case ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(snake_case , snake_case )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(snake_case ) , set_nan_tensor_to_zero(snake_case ) , atol=1E-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
f""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"""
f""" {torch.isnan(snake_case ).any()} and `inf`: {torch.isinf(snake_case )}. Dict has"""
f""" `nan`: {torch.isnan(snake_case ).any()} and `inf`: {torch.isinf(snake_case )}."""
) , )
recursive_check(snake_case , snake_case )
for model_class in self.all_model_classes:
snake_case_ :int = model_class(snake_case )
model.to(snake_case )
model.eval()
snake_case_ :Any = self._prepare_for_class(snake_case , snake_case )
snake_case_ :List[Any] = self._prepare_for_class(snake_case , snake_case )
check_equivalence(snake_case , snake_case , snake_case )
snake_case_ :Tuple = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
snake_case_ :Dict = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
check_equivalence(snake_case , snake_case , snake_case )
snake_case_ :Tuple = self._prepare_for_class(snake_case , snake_case )
snake_case_ :Any = self._prepare_for_class(snake_case , snake_case )
check_equivalence(snake_case , snake_case , snake_case , {"""output_hidden_states""": True} )
snake_case_ :Dict = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
snake_case_ :List[str] = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
check_equivalence(snake_case , snake_case , snake_case , {"""output_hidden_states""": True} )
@require_torch
class lowerCamelCase ( unittest.TestCase , _lowerCAmelCase ):
'''simple docstring'''
_A : int = (MaskFormerSwinBackbone,) if is_torch_available() else ()
_A : Tuple = MaskFormerSwinConfig
def lowerCAmelCase_ ( self: List[str] ) -> Optional[int]:
snake_case_ :Optional[Any] = MaskFormerSwinModelTester(self )
def lowerCAmelCase_ ( self: int ) -> Optional[int]:
snake_case_, snake_case_ :Any = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ :Tuple = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
snake_case_ :List[str] = backbone_class(snake_case )
backbone.to(snake_case )
backbone.eval()
snake_case_ :List[Any] = backbone(**snake_case )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , snake_case )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
snake_case_ :Union[str, Any] = backbone(**snake_case , output_hidden_states=snake_case )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
snake_case_, snake_case_, snake_case_ :List[Any] = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
snake_case_ :List[Any] = backbone(**snake_case , output_attentions=snake_case )
self.assertIsNotNone(outputs.attentions )
| 66 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase__ = {
"""vocab_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt""",
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-german-cased""": (
"""https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"""
),
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
},
}
lowerCamelCase__ = {
"""distilbert-base-uncased""": 512,
"""distilbert-base-uncased-distilled-squad""": 512,
"""distilbert-base-cased""": 512,
"""distilbert-base-cased-distilled-squad""": 512,
"""distilbert-base-german-cased""": 512,
"""distilbert-base-multilingual-cased""": 512,
}
lowerCamelCase__ = {
"""distilbert-base-uncased""": {"""do_lower_case""": True},
"""distilbert-base-uncased-distilled-squad""": {"""do_lower_case""": True},
"""distilbert-base-cased""": {"""do_lower_case""": False},
"""distilbert-base-cased-distilled-squad""": {"""do_lower_case""": False},
"""distilbert-base-german-cased""": {"""do_lower_case""": False},
"""distilbert-base-multilingual-cased""": {"""do_lower_case""": False},
}
class A__ ( _lowerCAmelCase ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = PRETRAINED_INIT_CONFIGURATION
lowercase = ["""input_ids""", """attention_mask"""]
lowercase = DistilBertTokenizer
def __init__( self : List[Any] , a : Dict=None , a : Tuple=None , a : List[str]=True , a : int="[UNK]" , a : List[str]="[SEP]" , a : Any="[PAD]" , a : List[Any]="[CLS]" , a : int="[MASK]" , a : Optional[Any]=True , a : Dict=None , **a : List[str] , ):
'''simple docstring'''
super().__init__(
a , tokenizer_file=a , do_lower_case=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , tokenize_chinese_chars=a , strip_accents=a , **a , )
lowerCAmelCase__ : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , a ) != do_lower_case
or normalizer_state.get('strip_accents' , a ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , a ) != tokenize_chinese_chars
):
lowerCAmelCase__ : Optional[Any] = getattr(a , normalizer_state.pop('type' ) )
lowerCAmelCase__ : Any = do_lower_case
lowerCAmelCase__ : Optional[int] = strip_accents
lowerCAmelCase__ : Optional[Any] = tokenize_chinese_chars
lowerCAmelCase__ : Dict = normalizer_class(**a )
lowerCAmelCase__ : Optional[Any] = do_lower_case
def _lowerCamelCase ( self : Dict , a : Union[str, Any] , a : Union[str, Any]=None ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowerCamelCase ( self : Any , a : List[int] , a : Optional[List[int]] = None ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = [self.sep_token_id]
lowerCAmelCase__ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCamelCase ( self : str , a : str , a : Optional[str] = None ):
'''simple docstring'''
lowerCAmelCase__ : Any = self._tokenizer.model.save(a , name=a )
return tuple(a ) | 212 |
"""simple docstring"""
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
__a = logging.get_logger(__name__)
enable_full_determinism()
class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_A : List[Any] = UNetaDModel
_A : Union[str, Any] = """sample"""
@property
def lowerCAmelCase_ ( self: str ) -> Tuple:
snake_case_ :List[str] = 4
snake_case_ :Tuple = 3
snake_case_ :Optional[Any] = (32, 32)
snake_case_ :str = floats_tensor((batch_size, num_channels) + sizes ).to(snake_case )
snake_case_ :Union[str, Any] = torch.tensor([10] ).to(snake_case )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase_ ( self: List[str] ) -> Dict:
return (3, 32, 32)
@property
def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[int]:
return (3, 32, 32)
def lowerCAmelCase_ ( self: Optional[int] ) -> Dict:
snake_case_ :Any = {
"""block_out_channels""": (32, 64),
"""down_block_types""": ("""DownBlock2D""", """AttnDownBlock2D"""),
"""up_block_types""": ("""AttnUpBlock2D""", """UpBlock2D"""),
"""attention_head_dim""": 3,
"""out_channels""": 3,
"""in_channels""": 3,
"""layers_per_block""": 2,
"""sample_size""": 32,
}
snake_case_ :Tuple = self.dummy_input
return init_dict, inputs_dict
class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_A : List[str] = UNetaDModel
_A : Union[str, Any] = """sample"""
@property
def lowerCAmelCase_ ( self: str ) -> str:
snake_case_ :List[str] = 4
snake_case_ :Optional[int] = 4
snake_case_ :int = (32, 32)
snake_case_ :Any = floats_tensor((batch_size, num_channels) + sizes ).to(snake_case )
snake_case_ :List[Any] = torch.tensor([10] ).to(snake_case )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[int]:
return (4, 32, 32)
@property
def lowerCAmelCase_ ( self: List[Any] ) -> int:
return (4, 32, 32)
def lowerCAmelCase_ ( self: Union[str, Any] ) -> List[Any]:
snake_case_ :Dict = {
"""sample_size""": 32,
"""in_channels""": 4,
"""out_channels""": 4,
"""layers_per_block""": 2,
"""block_out_channels""": (32, 64),
"""attention_head_dim""": 32,
"""down_block_types""": ("""DownBlock2D""", """DownBlock2D"""),
"""up_block_types""": ("""UpBlock2D""", """UpBlock2D"""),
}
snake_case_ :List[str] = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[Any]:
snake_case_, snake_case_ :List[str] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=snake_case )
self.assertIsNotNone(snake_case )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(snake_case )
snake_case_ :List[str] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def lowerCAmelCase_ ( self: Tuple ) -> Dict:
snake_case_, snake_case_ :Union[str, Any] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=snake_case )
model.to(snake_case )
snake_case_ :Union[str, Any] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def lowerCAmelCase_ ( self: str ) -> Any:
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
snake_case_, snake_case_ :List[str] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=snake_case )
model_accelerate.to(snake_case )
model_accelerate.eval()
snake_case_ :List[Any] = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
snake_case_ :int = noise.to(snake_case )
snake_case_ :str = torch.tensor([10] * noise.shape[0] ).to(snake_case )
snake_case_ :Optional[int] = model_accelerate(snake_case , snake_case )["""sample"""]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
snake_case_, snake_case_ :str = UNetaDModel.from_pretrained(
"""fusing/unet-ldm-dummy-update""" , output_loading_info=snake_case , low_cpu_mem_usage=snake_case )
model_normal_load.to(snake_case )
model_normal_load.eval()
snake_case_ :int = model_normal_load(snake_case , snake_case )["""sample"""]
assert torch_all_close(snake_case , snake_case , rtol=1E-3 )
def lowerCAmelCase_ ( self: Tuple ) -> Any:
snake_case_ :Tuple = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" )
model.eval()
model.to(snake_case )
snake_case_ :Optional[int] = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
snake_case_ :int = noise.to(snake_case )
snake_case_ :List[Any] = torch.tensor([10] * noise.shape[0] ).to(snake_case )
with torch.no_grad():
snake_case_ :Union[str, Any] = model(snake_case , snake_case ).sample
snake_case_ :Optional[int] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
snake_case_ :Dict = torch.tensor([-1_3.3_2_5_8, -2_0.1_1_0_0, -1_5.9_8_7_3, -1_7.6_6_1_7, -2_3.0_5_9_6, -1_7.9_4_1_9, -1_3.3_6_7_5, -1_6.1_8_8_9, -1_2.3_8_0_0] )
# fmt: on
self.assertTrue(torch_all_close(snake_case , snake_case , rtol=1E-3 ) )
class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_A : List[Any] = UNetaDModel
_A : List[Any] = """sample"""
@property
def lowerCAmelCase_ ( self: Union[str, Any] , snake_case: int=(32, 32) ) -> Tuple:
snake_case_ :Union[str, Any] = 4
snake_case_ :Any = 3
snake_case_ :int = floats_tensor((batch_size, num_channels) + sizes ).to(snake_case )
snake_case_ :Any = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=snake_case )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Any:
return (3, 32, 32)
@property
def lowerCAmelCase_ ( self: int ) -> Tuple:
return (3, 32, 32)
def lowerCAmelCase_ ( self: List[str] ) -> Tuple:
snake_case_ :List[Any] = {
"""block_out_channels""": [32, 64, 64, 64],
"""in_channels""": 3,
"""layers_per_block""": 1,
"""out_channels""": 3,
"""time_embedding_type""": """fourier""",
"""norm_eps""": 1E-6,
"""mid_block_scale_factor""": math.sqrt(2.0 ),
"""norm_num_groups""": None,
"""down_block_types""": [
"""SkipDownBlock2D""",
"""AttnSkipDownBlock2D""",
"""SkipDownBlock2D""",
"""SkipDownBlock2D""",
],
"""up_block_types""": [
"""SkipUpBlock2D""",
"""SkipUpBlock2D""",
"""AttnSkipUpBlock2D""",
"""SkipUpBlock2D""",
],
}
snake_case_ :int = self.dummy_input
return init_dict, inputs_dict
@slow
def lowerCAmelCase_ ( self: Optional[Any] ) -> List[Any]:
snake_case_, snake_case_ :List[Any] = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" , output_loading_info=snake_case )
self.assertIsNotNone(snake_case )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(snake_case )
snake_case_ :Any = self.dummy_input
snake_case_ :int = floats_tensor((4, 3) + (256, 256) ).to(snake_case )
snake_case_ :int = noise
snake_case_ :int = model(**snake_case )
assert image is not None, "Make sure output is not None"
@slow
def lowerCAmelCase_ ( self: str ) -> Dict:
snake_case_ :Dict = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" )
model.to(snake_case )
snake_case_ :List[str] = 4
snake_case_ :Optional[int] = 3
snake_case_ :List[str] = (256, 256)
snake_case_ :Tuple = torch.ones((batch_size, num_channels) + sizes ).to(snake_case )
snake_case_ :str = torch.tensor(batch_size * [1E-4] ).to(snake_case )
with torch.no_grad():
snake_case_ :Dict = model(snake_case , snake_case ).sample
snake_case_ :int = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
snake_case_ :Optional[Any] = torch.tensor([-4_8_4_2.8_6_9_1, -6_4_9_9.6_6_3_1, -3_8_0_0.1_9_5_3, -7_9_7_8.2_6_8_6, -1_0_9_8_0.7_1_2_9, -2_0_0_2_8.8_5_3_5, 8_1_4_8.2_8_2_2, 2_3_4_2.2_9_0_5, 5_6_7.7_6_0_8] )
# fmt: on
self.assertTrue(torch_all_close(snake_case , snake_case , rtol=1E-2 ) )
def lowerCAmelCase_ ( self: List[str] ) -> List[Any]:
snake_case_ :Optional[Any] = UNetaDModel.from_pretrained("""fusing/ncsnpp-ffhq-ve-dummy-update""" )
model.to(snake_case )
snake_case_ :Optional[int] = 4
snake_case_ :Optional[Any] = 3
snake_case_ :Optional[Any] = (32, 32)
snake_case_ :Dict = torch.ones((batch_size, num_channels) + sizes ).to(snake_case )
snake_case_ :Any = torch.tensor(batch_size * [1E-4] ).to(snake_case )
with torch.no_grad():
snake_case_ :str = model(snake_case , snake_case ).sample
snake_case_ :int = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
snake_case_ :int = torch.tensor([-0.0_3_2_5, -0.0_9_0_0, -0.0_8_6_9, -0.0_3_3_2, -0.0_7_2_5, -0.0_2_7_0, -0.0_1_0_1, 0.0_2_2_7, 0.0_2_5_6] )
# fmt: on
self.assertTrue(torch_all_close(snake_case , snake_case , rtol=1E-2 ) )
def lowerCAmelCase_ ( self: Dict ) -> Optional[Any]:
# not required for this model
pass
| 66 | 0 |
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
UpperCAmelCase_ = {
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def lowerCamelCase__ ( A__ : str , A__ : Any ):
'''simple docstring'''
if args.student_type == "roberta":
__lowerCamelCase = False
elif args.student_type == "gpt2":
__lowerCamelCase = False
def lowerCamelCase__ ( A__ : Tuple , A__ : Any ):
'''simple docstring'''
if args.student_type == "roberta":
__lowerCamelCase = False
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = argparse.ArgumentParser(description="""Training""" )
parser.add_argument("""--force""" , action="""store_true""" , help="""Overwrite dump_path if it already exists.""" )
parser.add_argument(
"""--dump_path""" , type=_lowercase , required=_lowercase , help="""The output directory (log, checkpoints, parameters, etc.)""" )
parser.add_argument(
"""--data_file""" , type=_lowercase , required=_lowercase , help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""" , )
parser.add_argument(
"""--student_type""" , type=_lowercase , choices=["""distilbert""", """roberta""", """gpt2"""] , required=_lowercase , help="""The student type (DistilBERT, RoBERTa).""" , )
parser.add_argument("""--student_config""" , type=_lowercase , required=_lowercase , help="""Path to the student configuration.""" )
parser.add_argument(
"""--student_pretrained_weights""" , default=_lowercase , type=_lowercase , help="""Load student initialization checkpoint.""" )
parser.add_argument(
"""--teacher_type""" , choices=["""bert""", """roberta""", """gpt2"""] , required=_lowercase , help="""Teacher type (BERT, RoBERTa).""" )
parser.add_argument("""--teacher_name""" , type=_lowercase , required=_lowercase , help="""The teacher model.""" )
parser.add_argument("""--temperature""" , default=2.0 , type=_lowercase , help="""Temperature for the softmax temperature.""" )
parser.add_argument(
"""--alpha_ce""" , default=0.5 , type=_lowercase , help="""Linear weight for the distillation loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_mlm""" , default=0.0 , type=_lowercase , help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""" , )
parser.add_argument("""--alpha_clm""" , default=0.5 , type=_lowercase , help="""Linear weight for the CLM loss. Must be >=0.""" )
parser.add_argument("""--alpha_mse""" , default=0.0 , type=_lowercase , help="""Linear weight of the MSE loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_cos""" , default=0.0 , type=_lowercase , help="""Linear weight of the cosine embedding loss. Must be >=0.""" )
parser.add_argument(
"""--mlm""" , action="""store_true""" , help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""" )
parser.add_argument(
"""--mlm_mask_prop""" , default=0.15 , type=_lowercase , help="""Proportion of tokens for which we need to make a prediction.""" , )
parser.add_argument("""--word_mask""" , default=0.8 , type=_lowercase , help="""Proportion of tokens to mask out.""" )
parser.add_argument("""--word_keep""" , default=0.1 , type=_lowercase , help="""Proportion of tokens to keep.""" )
parser.add_argument("""--word_rand""" , default=0.1 , type=_lowercase , help="""Proportion of tokens to randomly replace.""" )
parser.add_argument(
"""--mlm_smoothing""" , default=0.7 , type=_lowercase , help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""" , )
parser.add_argument("""--token_counts""" , type=_lowercase , help="""The token counts in the data_file for MLM.""" )
parser.add_argument(
"""--restrict_ce_to_mask""" , action="""store_true""" , help="""If true, compute the distillation loss only the [MLM] prediction distribution.""" , )
parser.add_argument(
"""--freeze_pos_embs""" , action="""store_true""" , help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""" , )
parser.add_argument(
"""--freeze_token_type_embds""" , action="""store_true""" , help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""" , )
parser.add_argument("""--n_epoch""" , type=_lowercase , default=3 , help="""Number of pass on the whole dataset.""" )
parser.add_argument("""--batch_size""" , type=_lowercase , default=5 , help="""Batch size (for each process).""" )
parser.add_argument(
"""--group_by_size""" , action="""store_false""" , help="""If true, group sequences that have similar length into the same batch. Default is true.""" , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=_lowercase , default=50 , help="""Gradient accumulation for larger training batches.""" , )
parser.add_argument("""--warmup_prop""" , default=0.05 , type=_lowercase , help="""Linear warmup proportion.""" )
parser.add_argument("""--weight_decay""" , default=0.0 , type=_lowercase , help="""Weight decay if we apply some.""" )
parser.add_argument("""--learning_rate""" , default=5E-4 , type=_lowercase , help="""The initial learning rate for Adam.""" )
parser.add_argument("""--adam_epsilon""" , default=1E-6 , type=_lowercase , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--max_grad_norm""" , default=5.0 , type=_lowercase , help="""Max gradient norm.""" )
parser.add_argument("""--initializer_range""" , default=0.02 , type=_lowercase , help="""Random initialization range.""" )
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=_lowercase , default="""O1""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_gpu""" , type=_lowercase , default=1 , help="""Number of GPUs in the node.""" )
parser.add_argument("""--local_rank""" , type=_lowercase , default=-1 , help="""Distributed training - Local rank""" )
parser.add_argument("""--seed""" , type=_lowercase , default=56 , help="""Random seed""" )
parser.add_argument("""--log_interval""" , type=_lowercase , default=500 , help="""Tensorboard logging interval.""" )
parser.add_argument("""--checkpoint_interval""" , type=_lowercase , default=4000 , help="""Checkpoint interval.""" )
__lowerCamelCase = parser.parse_args()
sanity_checks(_lowercase )
# ARGS #
init_gpu_params(_lowercase )
set_seed(_lowercase )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f'Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'
""" itUse `--force` if you want to overwrite it""" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f'Experiment will be dumped and logged in {args.dump_path}' )
# SAVE PARAMS #
logger.info(f'Param: {args}' )
with open(os.path.join(args.dump_path , """parameters.json""" ) , """w""" ) as f:
json.dump(vars(_lowercase ) , _lowercase , indent=4 )
git_log(args.dump_path )
__lowerCamelCase = MODEL_CLASSES[args.student_type]
__lowerCamelCase = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
__lowerCamelCase = teacher_tokenizer_class.from_pretrained(args.teacher_name )
__lowerCamelCase = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
__lowerCamelCase = tokenizer.all_special_tokens.index(_lowercase )
__lowerCamelCase = tokenizer.all_special_ids[idx]
logger.info(f'Special tokens {special_tok_ids}' )
__lowerCamelCase = special_tok_ids
__lowerCamelCase = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f'Loading data from {args.data_file}' )
with open(args.data_file , """rb""" ) as fp:
__lowerCamelCase = pickle.load(_lowercase )
if args.mlm:
logger.info(f'Loading token counts from {args.token_counts} (already pre-computed)' )
with open(args.token_counts , """rb""" ) as fp:
__lowerCamelCase = pickle.load(_lowercase )
__lowerCamelCase = np.maximum(_lowercase , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
__lowerCamelCase = 0.0 # do not predict special tokens
__lowerCamelCase = torch.from_numpy(_lowercase )
else:
__lowerCamelCase = None
__lowerCamelCase = LmSeqsDataset(params=_lowercase , data=_lowercase )
logger.info("""Data loader created.""" )
# STUDENT #
logger.info(f'Loading student config from {args.student_config}' )
__lowerCamelCase = student_config_class.from_pretrained(args.student_config )
__lowerCamelCase = True
if args.student_pretrained_weights is not None:
logger.info(f'Loading pretrained weights from {args.student_pretrained_weights}' )
__lowerCamelCase = student_model_class.from_pretrained(args.student_pretrained_weights , config=_lowercase )
else:
__lowerCamelCase = student_model_class(_lowercase )
if args.n_gpu > 0:
student.to(f'cuda:{args.local_rank}' )
logger.info("""Student loaded.""" )
# TEACHER #
__lowerCamelCase = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=_lowercase )
if args.n_gpu > 0:
teacher.to(f'cuda:{args.local_rank}' )
logger.info(f'Teacher loaded from {args.teacher_name}.' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(_lowercase , _lowercase )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(_lowercase , _lowercase )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
__lowerCamelCase = Distiller(
params=_lowercase , dataset=_lowercase , token_probs=_lowercase , student=_lowercase , teacher=_lowercase )
distiller.train()
logger.info("""Let's go get some drinks.""" )
if __name__ == "__main__":
main()
| 12 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__a = {
"configuration_mask2former": [
"MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Mask2FormerConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["Mask2FormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"Mask2FormerForUniversalSegmentation",
"Mask2FormerModel",
"Mask2FormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 66 | 0 |
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
)
| 313 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_A : str = StableDiffusionSAGPipeline
_A : Optional[Any] = TEXT_TO_IMAGE_PARAMS
_A : Any = TEXT_TO_IMAGE_BATCH_PARAMS
_A : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
_A : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
_A : List[str] = False
def lowerCAmelCase_ ( self: Optional[Any] ) -> str:
torch.manual_seed(0 )
snake_case_ :Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
snake_case_ :Any = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=snake_case , set_alpha_to_one=snake_case , )
torch.manual_seed(0 )
snake_case_ :Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
snake_case_ :Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
snake_case_ :Tuple = CLIPTextModel(snake_case )
snake_case_ :str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
snake_case_ :Dict = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCAmelCase_ ( self: List[str] , snake_case: Tuple , snake_case: List[str]=0 ) -> str:
if str(snake_case ).startswith("""mps""" ):
snake_case_ :Tuple = torch.manual_seed(snake_case )
else:
snake_case_ :Optional[int] = torch.Generator(device=snake_case ).manual_seed(snake_case )
snake_case_ :Any = {
"""prompt""": """.""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 1.0,
"""sag_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase_ ( self: Optional[int] ) -> str:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self: int ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self: int ) -> List[str]:
snake_case_ :Any = StableDiffusionSAGPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" )
snake_case_ :int = sag_pipe.to(snake_case )
sag_pipe.set_progress_bar_config(disable=snake_case )
snake_case_ :Union[str, Any] = """."""
snake_case_ :str = torch.manual_seed(0 )
snake_case_ :str = sag_pipe(
[prompt] , generator=snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" )
snake_case_ :List[Any] = output.images
snake_case_ :Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
snake_case_ :List[Any] = np.array([0.1_5_6_8, 0.1_7_3_8, 0.1_6_9_5, 0.1_6_9_3, 0.1_5_0_7, 0.1_7_0_5, 0.1_5_4_7, 0.1_7_5_1, 0.1_9_4_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def lowerCAmelCase_ ( self: Dict ) -> str:
snake_case_ :Tuple = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
snake_case_ :Optional[int] = sag_pipe.to(snake_case )
sag_pipe.set_progress_bar_config(disable=snake_case )
snake_case_ :Tuple = """."""
snake_case_ :Union[str, Any] = torch.manual_seed(0 )
snake_case_ :Tuple = sag_pipe(
[prompt] , generator=snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" )
snake_case_ :Optional[int] = output.images
snake_case_ :Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
snake_case_ :Tuple = np.array([0.3_4_5_9, 0.2_8_7_6, 0.2_5_3_7, 0.3_0_0_2, 0.2_6_7_1, 0.2_1_6_0, 0.3_0_2_6, 0.2_2_6_2, 0.2_3_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def lowerCAmelCase_ ( self: List[str] ) -> List[str]:
snake_case_ :Optional[int] = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
snake_case_ :int = sag_pipe.to(snake_case )
sag_pipe.set_progress_bar_config(disable=snake_case )
snake_case_ :Tuple = """."""
snake_case_ :Optional[int] = torch.manual_seed(0 )
snake_case_ :List[str] = sag_pipe(
[prompt] , width=768 , height=512 , generator=snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" , )
snake_case_ :Optional[Any] = output.images
assert image.shape == (1, 512, 768, 3)
| 66 | 0 |
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
_lowerCamelCase ="""\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n"""
_lowerCamelCase ="""\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper \"Evaluating Large Language Models Trained on Code\"\n(https://arxiv.org/abs/2107.03374).\n"""
_lowerCamelCase ="""\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric(\"code_eval\")\n >>> test_cases = [\"assert add(2,3)==5\"]\n >>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {'pass@1': 0.5, 'pass@2': 1.0}\n"""
_lowerCamelCase ="""\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe \"code_eval\" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper \"Evaluating Large\nLanguage Models Trained on Code\" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\"\n\n################################################################################\\n"""
_lowerCamelCase ="""The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE."""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class A__ ( datasets.Metric):
def UpperCamelCase__ ( self ):
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Value("""string""" ),
} ) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__=[1, 1_0, 1_0_0] , __magic_name__=4 , __magic_name__=3.0 ):
if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("""This metric is currently not supported on Windows.""" )
with ThreadPoolExecutor(max_workers=__magic_name__ ) as executor:
lowerCamelCase : Union[str, Any] = []
lowerCamelCase : Optional[Any] = Counter()
lowerCamelCase : List[Any] = 0
lowerCamelCase : Optional[Any] = defaultdict(__magic_name__ )
for task_id, (candidates, test_case) in enumerate(zip(__magic_name__ , __magic_name__ ) ):
for candidate in candidates:
lowerCamelCase : Dict = candidate + """\n""" + test_case
lowerCamelCase : List[Any] = (test_program, timeout, task_id, completion_id[task_id])
lowerCamelCase : List[Any] = executor.submit(__magic_name__ , *__magic_name__ )
futures.append(__magic_name__ )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(__magic_name__ ):
lowerCamelCase : Any = future.result()
results[result["task_id"]].append((result["""completion_id"""], result) )
lowerCamelCase : Union[str, Any] = [], []
for result in results.values():
result.sort()
lowerCamelCase : Dict = [r[1]["""passed"""] for r in result]
total.append(len(__magic_name__ ) )
correct.append(sum(__magic_name__ ) )
lowerCamelCase : Union[str, Any] = np.array(__magic_name__ )
lowerCamelCase : Any = np.array(__magic_name__ )
lowerCamelCase : int = k
lowerCamelCase : str = {F'''pass@{k}''': estimate_pass_at_k(__magic_name__ , __magic_name__ , __magic_name__ ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
def estimator(lowerCamelCase, lowerCamelCase, lowerCamelCase ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1, n + 1 ) )
if isinstance(_lowercase, _lowercase ):
lowerCamelCase : List[Any] = itertools.repeat(_lowercase, len(_lowercase ) )
else:
assert len(_lowercase ) == len(_lowercase )
lowerCamelCase : Optional[int] = iter(_lowercase )
return np.array([estimator(int(_lowercase ), int(_lowercase ), _lowercase ) for n, c in zip(_lowercase, _lowercase )] )
| 287 |
"""simple docstring"""
from __future__ import annotations
from collections import Counter
from random import random
class lowerCamelCase :
'''simple docstring'''
def __init__( self: Tuple ) -> Optional[Any]:
snake_case_ :Optional[int] = {}
def lowerCAmelCase_ ( self: Dict , snake_case: str ) -> None:
snake_case_ :str = {}
def lowerCAmelCase_ ( self: Optional[int] , snake_case: str , snake_case: str , snake_case: float ) -> None:
if nodea not in self.connections:
self.add_node(snake_case )
if nodea not in self.connections:
self.add_node(snake_case )
snake_case_ :Dict = probability
def lowerCAmelCase_ ( self: List[Any] ) -> list[str]:
return list(self.connections )
def lowerCAmelCase_ ( self: Any , snake_case: str ) -> str:
snake_case_ :Optional[Any] = 0
snake_case_ :List[str] = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def A_ ( _lowercase, _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :List[str] = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(_lowercase, _lowercase, _lowercase )
snake_case_ :int = Counter(graph.get_nodes() )
snake_case_ :Optional[Any] = start
for _ in range(_lowercase ):
snake_case_ :Tuple = graph.transition(_lowercase )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 66 | 0 |
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"""files""" , [
["""full:README.md""", """dataset_infos.json"""],
["""empty:README.md""", """dataset_infos.json"""],
["""dataset_infos.json"""],
["""full:README.md"""],
] , )
def __magic_name__ ( __a : int , __a : List[str] ):
'''simple docstring'''
UpperCamelCase__ = tmp_path_factory.mktemp("""dset_infos_dir""" )
if "full:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""" ) as f:
f.write("""---\ndataset_info:\n dataset_size: 42\n---""" )
if "empty:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""" ) as f:
f.write("""""" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / """dataset_infos.json""" , """w""" ) as f:
f.write("""{\"default\": {\"dataset_size\": 42}}""" )
UpperCamelCase__ = DatasetInfosDict.from_directory(_lowercase )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
"""dataset_info""" , [
DatasetInfo(),
DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=42 , ),
] , )
def __magic_name__ ( __a : List[Any] , __a : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ = str(_lowercase )
dataset_info.write_to_directory(_lowercase )
UpperCamelCase__ = DatasetInfo.from_directory(_lowercase )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(_lowercase , """dataset_info.json""" ) )
def __magic_name__ ( ):
'''simple docstring'''
UpperCamelCase__ = DatasetInfo(
description="""foo""" , citation="""bar""" , homepage="""https://foo.bar""" , license="""CC0""" , features=Features({"""a""": Value("""int32""" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train""", """num_examples""": 42}] , download_checksums={} , download_size=1_337 , post_processing_size=442 , dataset_size=1_234 , size_in_bytes=1_337 + 442 + 1_234 , )
UpperCamelCase__ = dataset_info._to_yaml_dict()
assert sorted(_lowercase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
UpperCamelCase__ = yaml.safe_dump(_lowercase )
UpperCamelCase__ = yaml.safe_load(_lowercase )
assert dataset_info_yaml_dict == reloaded
def __magic_name__ ( ):
'''simple docstring'''
UpperCamelCase__ = DatasetInfo()
UpperCamelCase__ = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"""dataset_infos_dict""" , [
DatasetInfosDict(),
DatasetInfosDict({"""default""": DatasetInfo()} ),
DatasetInfosDict({"""my_config_name""": DatasetInfo()} ),
DatasetInfosDict(
{
"""default""": DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=42 , )
} ),
DatasetInfosDict(
{
"""v1""": DatasetInfo(dataset_size=42 ),
"""v2""": DatasetInfo(dataset_size=1_337 ),
} ),
] , )
def __magic_name__ ( __a : Union[str, Any] , __a : str ):
'''simple docstring'''
UpperCamelCase__ = str(_lowercase )
dataset_infos_dict.write_to_directory(_lowercase )
UpperCamelCase__ = DatasetInfosDict.from_directory(_lowercase )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
UpperCamelCase__ = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
UpperCamelCase__ = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(_lowercase , """README.md""" ) )
| 244 |
"""simple docstring"""
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
__a = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
__a = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
__a = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
__a = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__a = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
__a = [
("pretraining", "MODEL_FOR_PRETRAINING_MAPPING_NAMES", "AutoModelForPreTraining"),
("feature-extraction", "MODEL_MAPPING_NAMES", "AutoModel"),
("audio-classification", "MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioClassification"),
("text-generation", "MODEL_FOR_CAUSAL_LM_MAPPING_NAMES", "AutoModelForCausalLM"),
("automatic-speech-recognition", "MODEL_FOR_CTC_MAPPING_NAMES", "AutoModelForCTC"),
("image-classification", "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForImageClassification"),
("image-segmentation", "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES", "AutoModelForImageSegmentation"),
("fill-mask", "MODEL_FOR_MASKED_LM_MAPPING_NAMES", "AutoModelForMaskedLM"),
("object-detection", "MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForObjectDetection"),
(
"zero-shot-object-detection",
"MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES",
"AutoModelForZeroShotObjectDetection",
),
("question-answering", "MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForQuestionAnswering"),
("text2text-generation", "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES", "AutoModelForSeq2SeqLM"),
("text-classification", "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForSequenceClassification"),
("automatic-speech-recognition", "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES", "AutoModelForSpeechSeq2Seq"),
(
"table-question-answering",
"MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForTableQuestionAnswering",
),
("token-classification", "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES", "AutoModelForTokenClassification"),
("multiple-choice", "MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES", "AutoModelForMultipleChoice"),
(
"next-sentence-prediction",
"MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES",
"AutoModelForNextSentencePrediction",
),
(
"audio-frame-classification",
"MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForAudioFrameClassification",
),
("audio-xvector", "MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES", "AutoModelForAudioXVector"),
(
"document-question-answering",
"MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForDocumentQuestionAnswering",
),
(
"visual-question-answering",
"MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForVisualQuestionAnswering",
),
("image-to-text", "MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES", "AutoModelForVision2Seq"),
(
"zero-shot-image-classification",
"MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForZeroShotImageClassification",
),
("depth-estimation", "MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES", "AutoModelForDepthEstimation"),
("video-classification", "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForVideoClassification"),
("mask-generation", "MODEL_FOR_MASK_GENERATION_MAPPING_NAMES", "AutoModelForMaskGeneration"),
]
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :Any = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""", _lowercase )
return [m.group(0 ) for m in matches]
def A_ ( ):
'''simple docstring'''
snake_case_ :int = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
snake_case_ :Dict = {
config.replace("""Config""", """""" ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
snake_case_ :Optional[Any] = collections.defaultdict(_lowercase )
snake_case_ :int = collections.defaultdict(_lowercase )
snake_case_ :List[str] = collections.defaultdict(_lowercase )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(_lowercase ):
snake_case_ :int = None
if _re_tf_models.match(_lowercase ) is not None:
snake_case_ :int = tf_models
snake_case_ :List[str] = _re_tf_models.match(_lowercase ).groups()[0]
elif _re_flax_models.match(_lowercase ) is not None:
snake_case_ :List[Any] = flax_models
snake_case_ :Any = _re_flax_models.match(_lowercase ).groups()[0]
elif _re_pt_models.match(_lowercase ) is not None:
snake_case_ :Optional[Any] = pt_models
snake_case_ :int = _re_pt_models.match(_lowercase ).groups()[0]
if lookup_dict is not None:
while len(_lowercase ) > 0:
if attr_name in model_prefix_to_model_type:
snake_case_ :Optional[int] = True
break
# Try again after removing the last word in the name
snake_case_ :Optional[Any] = """""".join(camel_case_split(_lowercase )[:-1] )
snake_case_ :Optional[int] = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
snake_case_ :Optional[Any] = list(_lowercase )
all_models.sort()
snake_case_ :Optional[int] = {"""model_type""": all_models}
snake_case_ :Optional[int] = [pt_models[t] for t in all_models]
snake_case_ :Any = [tf_models[t] for t in all_models]
snake_case_ :Dict = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
snake_case_ :Dict = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
snake_case_ :Optional[Any] = """AutoProcessor"""
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
snake_case_ :Tuple = """AutoTokenizer"""
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
snake_case_ :Tuple = """AutoFeatureExtractor"""
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
snake_case_ :str = """AutoTokenizer"""
snake_case_ :int = [processors[t] for t in all_models]
return pd.DataFrame(_lowercase )
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :List[Any] = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
snake_case_ :Optional[int] = [model_mapping, f"""TF_{model_mapping}""", f"""FLAX_{model_mapping}"""]
snake_case_ :List[str] = [auto_class, f"""TF_{auto_class}""", f"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(_lowercase, _lowercase, _lowercase ):
# The type of pipeline may not exist in this framework
if not hasattr(_lowercase, _lowercase ):
continue
# First extract all model_names
snake_case_ :Tuple = []
for name in getattr(_lowercase, _lowercase ).values():
if isinstance(_lowercase, _lowercase ):
model_names.append(_lowercase )
else:
model_names.extend(list(_lowercase ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :List[Any] = get_frameworks_table()
snake_case_ :str = Dataset.from_pandas(_lowercase )
snake_case_ :List[Any] = hf_hub_download(
"""huggingface/transformers-metadata""", """pipeline_tags.json""", repo_type="""dataset""", token=_lowercase )
snake_case_ :List[str] = Dataset.from_json(_lowercase )
snake_case_ :int = {
tags_dataset[i]["""model_class"""]: (tags_dataset[i]["""pipeline_tag"""], tags_dataset[i]["""auto_class"""])
for i in range(len(_lowercase ) )
}
snake_case_ :Optional[int] = update_pipeline_and_auto_class_table(_lowercase )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
snake_case_ :Tuple = sorted(table.keys() )
snake_case_ :Tuple = pd.DataFrame(
{
"""model_class""": model_classes,
"""pipeline_tag""": [table[m][0] for m in model_classes],
"""auto_class""": [table[m][1] for m in model_classes],
} )
snake_case_ :Union[str, Any] = Dataset.from_pandas(_lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(_lowercase, """frameworks.json""" ) )
tags_dataset.to_json(os.path.join(_lowercase, """pipeline_tags.json""" ) )
if commit_sha is not None:
snake_case_ :Union[str, Any] = (
f"""Update with commit {commit_sha}\n\nSee: """
f"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
snake_case_ :List[Any] = """Update"""
upload_folder(
repo_id="""huggingface/transformers-metadata""", folder_path=_lowercase, repo_type="""dataset""", token=_lowercase, commit_message=_lowercase, )
def A_ ( ):
'''simple docstring'''
snake_case_ :List[Any] = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
snake_case_ :Dict = transformers_module.pipelines.SUPPORTED_TASKS
snake_case_ :List[str] = []
for key in pipeline_tasks:
if key not in in_table:
snake_case_ :int = pipeline_tasks[key]["""pt"""]
if isinstance(_lowercase, (list, tuple) ):
snake_case_ :Any = model[0]
snake_case_ :str = model.__name__
if model not in in_table.values():
missing.append(_lowercase )
if len(_lowercase ) > 0:
snake_case_ :Optional[int] = """, """.join(_lowercase )
raise ValueError(
"""The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside """
f"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument("--token", type=str, help="The token to use to push to the transformers-metadata dataset.")
parser.add_argument("--commit_sha", type=str, help="The sha of the commit going with this update.")
parser.add_argument("--check-only", action="store_true", help="Activate to just check all pipelines are present.")
__a = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 66 | 0 |
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
lowerCamelCase_ : Optional[int] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["""memory_attention""", """encoder_attn"""],
["""attention""", """attn"""],
["""/""", """."""],
[""".LayerNorm.gamma""", """_layer_norm.weight"""],
[""".LayerNorm.beta""", """_layer_norm.bias"""],
["""r.layer_""", """r.layers."""],
["""output_proj""", """out_proj"""],
["""ffn.dense_1.""", """fc2."""],
["""ffn.dense.""", """fc1."""],
["""ffn_layer_norm""", """final_layer_norm"""],
["""kernel""", """weight"""],
["""encoder_layer_norm.""", """encoder.layer_norm."""],
["""decoder_layer_norm.""", """decoder.layer_norm."""],
["""embeddings.weights""", """shared.weight"""],
]
def _A ( lowercase ):
"""simple docstring"""
for pegasus_name, hf_name in PATTERNS:
a =k.replace(_lowercase , _lowercase )
return k
def _A ( lowercase , lowercase ):
"""simple docstring"""
a =DEFAULTS.copy()
cfg_kwargs.update(_lowercase )
a =PegasusConfig(**_lowercase )
a =PegasusForConditionalGeneration(_lowercase )
a =torch_model.model.state_dict()
a ={}
for k, v in tf_weights.items():
a =rename_state_dict_key(_lowercase )
if new_k not in sd:
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
a =v.T
a =torch.tensor(_lowercase , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
a =torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] )
a =mapping["""shared.weight"""]
a =mapping["""shared.weight"""]
a ={k: torch.zeros_like(_lowercase ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping}
mapping.update(**_lowercase )
a =torch_model.model.load_state_dict(_lowercase , strict=_lowercase )
a =[
k for k in missing if k not in ["""encoder.embed_positions.weight""", """decoder.embed_positions.weight"""]
]
assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], f'''no matches found for the following tf keys {extra}'''
return torch_model
def _A ( lowercase="./ckpt/aeslc/model.ckpt-32000" ):
"""simple docstring"""
a =tf.train.list_variables(_lowercase )
a ={}
a =["""Adafactor""", """global_step"""]
for name, shape in tqdm(_lowercase , desc='''converting tf checkpoint to dict''' ):
a =any(pat in name for pat in ignore_name )
if skip_key:
continue
a =tf.train.load_variable(_lowercase , _lowercase )
a =array
return tf_weights
def _A ( lowercase , lowercase ):
"""simple docstring"""
a =Path(_lowercase ).parent.name
a =task_specific_params[f'''summarization_{dataset}''']["""max_position_embeddings"""]
a =PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' , model_max_length=_lowercase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(_lowercase )
# convert model
a =get_tf_weights_as_numpy(_lowercase )
a =task_specific_params[f'''summarization_{dataset}''']
if dataset == "large":
a =task_specific_params
a =convert_pegasus(_lowercase , _lowercase )
torch_model.save_pretrained(_lowercase )
a =torch_model.state_dict()
sd.pop('''model.decoder.embed_positions.weight''' )
sd.pop('''model.encoder.embed_positions.weight''' )
torch.save(_lowercase , Path(_lowercase ) / '''pytorch_model.bin''' )
if __name__ == "__main__":
lowerCamelCase_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
lowerCamelCase_ : Any = parser.parse_args()
if args.save_dir is None:
lowerCamelCase_ : Tuple = Path(args.tf_ckpt_path).parent.name
lowerCamelCase_ : Any = os.path.join("""pegasus""", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir) | 81 |
"""simple docstring"""
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
__a = logging.getLogger(__name__)
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
_A : Union[str, Any] = """token-classification"""
def __init__( self: Any , snake_case: Tuple ) -> List[Any]:
if type(snake_case ) == dict:
snake_case_ :Optional[int] = Namespace(**snake_case )
snake_case_ :Optional[int] = import_module("""tasks""" )
try:
snake_case_ :Any = getattr(snake_case , hparams.task_type )
snake_case_ :TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"""Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
snake_case_ :Any = self.token_classification_task.get_labels(hparams.labels )
snake_case_ :str = CrossEntropyLoss().ignore_index
super().__init__(snake_case , len(self.labels ) , self.mode )
def lowerCAmelCase_ ( self: Dict , **snake_case: List[Any] ) -> Any:
return self.model(**snake_case )
def lowerCAmelCase_ ( self: str , snake_case: Tuple , snake_case: List[Any] ) -> Optional[int]:
snake_case_ :List[str] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
snake_case_ :List[str] = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
snake_case_ :Optional[Any] = self(**snake_case )
snake_case_ :List[str] = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def lowerCAmelCase_ ( self: int ) -> Dict:
snake_case_ :List[Any] = self.hparams
for mode in ["train", "dev", "test"]:
snake_case_ :Optional[int] = self._feature_file(snake_case )
if os.path.exists(snake_case ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , snake_case )
snake_case_ :Optional[int] = torch.load(snake_case )
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir )
snake_case_ :Optional[int] = self.token_classification_task.read_examples_from_file(args.data_dir , snake_case )
snake_case_ :Any = self.token_classification_task.convert_examples_to_features(
snake_case , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["""xlnet"""] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["""xlnet"""] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=snake_case , pad_on_left=bool(self.config.model_type in ["""xlnet"""] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("""Saving features into cached file %s""" , snake_case )
torch.save(snake_case , snake_case )
def lowerCAmelCase_ ( self: Optional[int] , snake_case: int , snake_case: int , snake_case: bool = False ) -> DataLoader:
snake_case_ :int = self._feature_file(snake_case )
logger.info("""Loading features from cached file %s""" , snake_case )
snake_case_ :str = torch.load(snake_case )
snake_case_ :Dict = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
snake_case_ :str = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
snake_case_ :List[Any] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
snake_case_ :List[str] = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
snake_case_ :Any = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(snake_case , snake_case , snake_case , snake_case ) , batch_size=snake_case )
def lowerCAmelCase_ ( self: List[str] , snake_case: Dict , snake_case: Union[str, Any] ) -> List[str]:
"""Compute validation""" ""
snake_case_ :List[str] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
snake_case_ :Dict = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
snake_case_ :Dict = self(**snake_case )
snake_case_, snake_case_ :Dict = outputs[:2]
snake_case_ :Union[str, Any] = logits.detach().cpu().numpy()
snake_case_ :List[Any] = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def lowerCAmelCase_ ( self: List[Any] , snake_case: int ) -> Tuple:
snake_case_ :Union[str, Any] = torch.stack([x["""val_loss"""] for x in outputs] ).mean()
snake_case_ :Tuple = np.concatenate([x["""pred"""] for x in outputs] , axis=0 )
snake_case_ :Tuple = np.argmax(snake_case , axis=2 )
snake_case_ :List[str] = np.concatenate([x["""target"""] for x in outputs] , axis=0 )
snake_case_ :Optional[Any] = dict(enumerate(self.labels ) )
snake_case_ :Dict = [[] for _ in range(out_label_ids.shape[0] )]
snake_case_ :Dict = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
snake_case_ :str = {
"""val_loss""": val_loss_mean,
"""accuracy_score""": accuracy_score(snake_case , snake_case ),
"""precision""": precision_score(snake_case , snake_case ),
"""recall""": recall_score(snake_case , snake_case ),
"""f1""": fa_score(snake_case , snake_case ),
}
snake_case_ :List[Any] = dict(results.items() )
snake_case_ :Union[str, Any] = results
return ret, preds_list, out_label_list
def lowerCAmelCase_ ( self: Optional[Any] , snake_case: Dict ) -> Optional[Any]:
# when stable
snake_case_, snake_case_, snake_case_ :Tuple = self._eval_end(snake_case )
snake_case_ :str = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def lowerCAmelCase_ ( self: Tuple , snake_case: Optional[int] ) -> Any:
# updating to test_epoch_end instead of deprecated test_end
snake_case_, snake_case_, snake_case_ :Any = self._eval_end(snake_case )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
snake_case_ :Optional[int] = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def lowerCAmelCase_ ( snake_case: Any , snake_case: int ) -> Dict:
# Add NER specific options
BaseTransformer.add_model_specific_args(snake_case , snake_case )
parser.add_argument(
"""--task_type""" , default="""NER""" , type=snake_case , help="""Task type to fine tune in training (e.g. NER, POS, etc)""" )
parser.add_argument(
"""--max_seq_length""" , default=128 , type=snake_case , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--labels""" , default="""""" , type=snake_case , help="""Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=snake_case , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
return parser
if __name__ == "__main__":
__a = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
__a = NERTransformer.add_model_specific_args(parser, os.getcwd())
__a = parser.parse_args()
__a = NERTransformer(args)
__a = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
__a = sorted(glob.glob(os.path.join(args.output_dir, "checkpoint-epoch=*.ckpt"), recursive=True))
__a = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 66 | 0 |
'''simple docstring'''
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
A__: int = object()
# For specifying empty leaf dict `{}`
A__: str = object()
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : int ) -> Any:
_a : List[str] =tuple((re.compile(x + """$""" ) for x in qs) )
for i in range(len(_lowercase ) - len(_lowercase ) + 1 ):
_a : Union[str, Any] =[x.match(_lowercase ) for x, y in zip(_lowercase ,ks[i:] )]
if matches and all(_lowercase ):
return True
return False
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any] ) -> List[Any]:
def replace(_UpperCAmelCase : Tuple ,_UpperCAmelCase : Any ):
for rule, replacement in rules:
if _match(_lowercase ,_lowercase ):
return replacement
return val
return replace
def SCREAMING_SNAKE_CASE_ ( ) -> Dict:
return [
# embeddings
(("transformer", "wpe", "embedding"), P("""mp""" ,_lowercase )),
(("transformer", "wte", "embedding"), P("""mp""" ,_lowercase )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(_lowercase ,"""mp""" )),
(("attention", "out_proj", "kernel"), P("""mp""" ,_lowercase )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(_lowercase ,"""mp""" )),
(("mlp", "c_fc", "bias"), P("""mp""" )),
(("mlp", "c_proj", "kernel"), P("""mp""" ,_lowercase )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[str] ) -> List[Any]:
_a : Optional[Any] =_get_partition_rules()
_a : Dict =_replacement_rules(_lowercase )
_a : Tuple ={k: _unmatched for k in flatten_dict(_lowercase )}
_a : List[str] ={k: replace(_lowercase ,_lowercase ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(_lowercase ) )
| 276 |
"""simple docstring"""
from math import factorial
class lowerCamelCase :
'''simple docstring'''
def __init__( self: Optional[int] , snake_case: Dict , snake_case: int ) -> Tuple:
snake_case_ :List[Any] = real
if isinstance(snake_case , snake_case ):
snake_case_ :Tuple = [1] * rank
else:
snake_case_ :Optional[Any] = rank
def __repr__( self: List[str] ) -> Tuple:
return (
f"""{self.real}+"""
f"""{'+'.join(str(snake_case )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"""
)
def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[int]:
snake_case_ :Any = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , snake_case )
def __add__( self: Optional[int] , snake_case: Dict ) -> List[str]:
if not isinstance(snake_case , snake_case ):
return Dual(self.real + other , self.duals )
snake_case_ :List[Any] = self.duals.copy()
snake_case_ :Tuple = other.duals.copy()
if len(snake_case ) > len(snake_case ):
o_dual.extend([1] * (len(snake_case ) - len(snake_case )) )
elif len(snake_case ) < len(snake_case ):
s_dual.extend([1] * (len(snake_case ) - len(snake_case )) )
snake_case_ :Dict = []
for i in range(len(snake_case ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , snake_case )
_A : str = __add__
def __sub__( self: Tuple , snake_case: Union[str, Any] ) -> Tuple:
return self + other * -1
def __mul__( self: str , snake_case: Tuple ) -> Optional[Any]:
if not isinstance(snake_case , snake_case ):
snake_case_ :Dict = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , snake_case )
snake_case_ :int = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , snake_case )
_A : int = __mul__
def __truediv__( self: List[str] , snake_case: List[str] ) -> List[str]:
if not isinstance(snake_case , snake_case ):
snake_case_ :Optional[Any] = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , snake_case )
raise ValueError
def __floordiv__( self: int , snake_case: List[Any] ) -> Any:
if not isinstance(snake_case , snake_case ):
snake_case_ :Optional[int] = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , snake_case )
raise ValueError
def __pow__( self: Optional[Any] , snake_case: Optional[int] ) -> List[Any]:
if n < 0 or isinstance(snake_case , snake_case ):
raise ValueError("""power must be a positive integer""" )
if n == 0:
return 1
if n == 1:
return self
snake_case_ :str = self
for _ in range(n - 1 ):
x *= self
return x
def A_ ( _lowercase, _lowercase, _lowercase ):
'''simple docstring'''
if not callable(_lowercase ):
raise ValueError("""differentiate() requires a function as input for func""" )
if not isinstance(_lowercase, (float, int) ):
raise ValueError("""differentiate() requires a float as input for position""" )
if not isinstance(_lowercase, _lowercase ):
raise ValueError("""differentiate() requires an int as input for order""" )
snake_case_ :Optional[Any] = Dual(_lowercase, 1 )
snake_case_ :List[Any] = func(_lowercase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def A_ ( _lowercase ):
'''simple docstring'''
return y**2 * y**4
print(differentiate(f, 9, 2))
| 66 | 0 |
'''simple docstring'''
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
__a = argparse.ArgumentParser('Stable Diffusion script with intel optimization', add_help=False)
parser.add_argument('--dpm', action='store_true', help='Enable DPMSolver or not')
parser.add_argument('--steps', default=None, type=int, help='Num inference steps')
__a = parser.parse_args()
__a = 'cpu'
__a = 'a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'
__a = 'path-to-your-trained-model'
__a = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
__a = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
__a = pipe.to(device)
# to channels last
__a = pipe.unet.to(memory_format=torch.channels_last)
__a = pipe.vae.to(memory_format=torch.channels_last)
__a = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
__a = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
__a = torch.randn(2, 4, 64, 64)
__a = torch.rand(1) * 999
__a = torch.randn(2, 77, 768)
__a = (sample, timestep, encoder_hidden_status)
try:
__a = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
__a = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
__a = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
__a = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
__a = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
__a = 666
__a = torch.Generator(device).manual_seed(seed)
__a = {'generator': generator}
if args.steps is not None:
__a = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
__a = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('generated.png') | 145 |
"""simple docstring"""
from __future__ import annotations
__a = 10
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :Union[str, Any] = 1
snake_case_ :List[str] = max(_lowercase )
while placement <= max_digit:
# declare and initialize empty buckets
snake_case_ :list[list] = [[] for _ in range(_lowercase )]
# split list_of_ints between the buckets
for i in list_of_ints:
snake_case_ :Any = int((i / placement) % RADIX )
buckets[tmp].append(_lowercase )
# put each buckets' contents into list_of_ints
snake_case_ :Optional[Any] = 0
for b in range(_lowercase ):
for i in buckets[b]:
snake_case_ :Union[str, Any] = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 66 | 0 |
"""simple docstring"""
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
__A : int = Lock()
def lowercase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(_lowercase )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
_UpperCAmelCase = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
_UpperCAmelCase = min(_lowercase , _lowercase )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(_lowercase )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
_UpperCAmelCase = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
_UpperCAmelCase = max(_lowercase , _lowercase )
# after all swaps are performed, send the values back to main
result_pipe[1].send(_lowercase )
def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
_UpperCAmelCase = []
_UpperCAmelCase = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
_UpperCAmelCase = Pipe()
_UpperCAmelCase = Pipe()
process_array_.append(
Process(
target=_lowercase , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
_UpperCAmelCase = temp_rs
_UpperCAmelCase = temp_rr
for i in range(1 , len(_lowercase ) - 1 ):
_UpperCAmelCase = Pipe()
_UpperCAmelCase = Pipe()
process_array_.append(
Process(
target=_lowercase , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
_UpperCAmelCase = temp_rs
_UpperCAmelCase = temp_rr
process_array_.append(
Process(
target=_lowercase , args=(
len(_lowercase ) - 1,
arr[len(_lowercase ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(_lowercase ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(_lowercase ) ):
_UpperCAmelCase = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = list(range(10 , 0 , -1 ) )
print('''Initial List''' )
print(*_lowercase )
_UpperCAmelCase = odd_even_transposition(_lowercase )
print('''Sorted List\n''' )
print(*_lowercase )
if __name__ == "__main__":
main()
| 260 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__a = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["ReformerTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["ReformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
"ReformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 66 | 0 |
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
_UpperCAmelCase : Optional[Any] ="""\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n"""
_UpperCAmelCase : List[Any] ="""\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n"""
_UpperCAmelCase : List[str] ="""\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for 'cvit-mkb-clsr' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for 'cvit-mkb-clsr' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"precision\": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wnli') # 'wnli' or any of [\"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wiki-ner')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'cvit-mkb-clsr')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'precision@10': 1.0}\n\n"""
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> List[str]:
return float((preds == labels).mean() )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]:
lowerCAmelCase_ : Any = simple_accuracy(_lowercase , _lowercase )
lowerCAmelCase_ : List[str] = float(fa_score(y_true=_lowercase , y_pred=_lowercase ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> str:
lowerCAmelCase_ : Union[str, Any] = np.array(_lowercase )
lowerCAmelCase_ : Tuple = np.array(_lowercase )
lowerCAmelCase_ : Dict = en_sentvecs.shape[0]
# mean centering
lowerCAmelCase_ : int = en_sentvecs - np.mean(_lowercase , axis=0 )
lowerCAmelCase_ : Dict = in_sentvecs - np.mean(_lowercase , axis=0 )
lowerCAmelCase_ : int = cdist(_lowercase , _lowercase , '''cosine''' )
lowerCAmelCase_ : Tuple = np.array(range(_lowercase ) )
lowerCAmelCase_ : Optional[Any] = sim.argsort(axis=1 )[:, :10]
lowerCAmelCase_ : List[str] = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class snake_case__( datasets.Metric ):
'''simple docstring'''
def lowercase_ ( self ) -> Union[str, Any]:
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", '''
'''\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", '''
'''\"wiki-ner\"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
'''references''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if self.config_name != '''cvit-mkb-clsr''' else None , )
def lowercase_ ( self , __lowercase , __lowercase ) -> Tuple:
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(__lowercase , __lowercase )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(__lowercase , __lowercase )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(__lowercase , __lowercase )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", '''
'''\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", '''
'''\"wiki-ner\"]''' ) | 262 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self: List[Any] ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowerCAmelCase_ ( self: Tuple ) -> Any:
snake_case_, snake_case_ :List[str] = FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-canny""" , from_pt=snake_case , dtype=jnp.bfloataa )
snake_case_, snake_case_ :Union[str, Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , controlnet=snake_case , from_pt=snake_case , dtype=jnp.bfloataa )
snake_case_ :Union[str, Any] = controlnet_params
snake_case_ :Union[str, Any] = """bird"""
snake_case_ :List[Any] = jax.device_count()
snake_case_ :List[Any] = pipe.prepare_text_inputs([prompts] * num_samples )
snake_case_ :List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" )
snake_case_ :List[str] = pipe.prepare_image_inputs([canny_image] * num_samples )
snake_case_ :Any = jax.random.PRNGKey(0 )
snake_case_ :List[str] = jax.random.split(snake_case , jax.device_count() )
snake_case_ :List[Any] = replicate(snake_case )
snake_case_ :List[str] = shard(snake_case )
snake_case_ :str = shard(snake_case )
snake_case_ :Dict = pipe(
prompt_ids=snake_case , image=snake_case , params=snake_case , prng_seed=snake_case , num_inference_steps=50 , jit=snake_case , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
snake_case_ :str = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
snake_case_ :Union[str, Any] = images[0, 253:256, 253:256, -1]
snake_case_ :str = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case_ :Dict = jnp.array(
[0.1_6_7_9_6_9, 0.1_1_6_6_9_9, 0.0_8_1_5_4_3, 0.1_5_4_2_9_7, 0.1_3_2_8_1_2, 0.1_0_8_8_8_7, 0.1_6_9_9_2_2, 0.1_6_9_9_2_2, 0.2_0_5_0_7_8] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowerCAmelCase_ ( self: int ) -> Dict:
snake_case_, snake_case_ :List[Any] = FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-openpose""" , from_pt=snake_case , dtype=jnp.bfloataa )
snake_case_, snake_case_ :int = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , controlnet=snake_case , from_pt=snake_case , dtype=jnp.bfloataa )
snake_case_ :str = controlnet_params
snake_case_ :Optional[int] = """Chef in the kitchen"""
snake_case_ :Union[str, Any] = jax.device_count()
snake_case_ :Any = pipe.prepare_text_inputs([prompts] * num_samples )
snake_case_ :str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png""" )
snake_case_ :Optional[Any] = pipe.prepare_image_inputs([pose_image] * num_samples )
snake_case_ :str = jax.random.PRNGKey(0 )
snake_case_ :str = jax.random.split(snake_case , jax.device_count() )
snake_case_ :Tuple = replicate(snake_case )
snake_case_ :str = shard(snake_case )
snake_case_ :int = shard(snake_case )
snake_case_ :List[str] = pipe(
prompt_ids=snake_case , image=snake_case , params=snake_case , prng_seed=snake_case , num_inference_steps=50 , jit=snake_case , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
snake_case_ :str = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
snake_case_ :int = images[0, 253:256, 253:256, -1]
snake_case_ :Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case_ :Optional[int] = jnp.array(
[[0.2_7_1_4_8_4, 0.2_6_1_7_1_9, 0.2_7_5_3_9_1, 0.2_7_7_3_4_4, 0.2_7_9_2_9_7, 0.2_9_1_0_1_6, 0.2_9_4_9_2_2, 0.3_0_2_7_3_4, 0.3_0_2_7_3_4]] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 66 | 0 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _a ( _lowerCAmelCase ):
"""simple docstring"""
_lowerCamelCase : Tuple = (UniPCMultistepScheduler,)
_lowerCamelCase : Any = (("""num_inference_steps""", 2_5),)
def __A ( self : Any , **UpperCAmelCase : Optional[Any] ):
A_ = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0_001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""solver_order""": 2,
"""solver_type""": """bh2""",
}
config.update(**UpperCAmelCase )
return config
def __A ( self : List[Any] , UpperCAmelCase : Dict=0 , **UpperCAmelCase : List[str] ):
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop("num_inference_steps" , UpperCAmelCase )
A_ = self.dummy_sample
A_ = 0.1 * sample
A_ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config(**UpperCAmelCase )
A_ = scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals
A_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase )
A_ = scheduler_class.from_pretrained(UpperCAmelCase )
new_scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals
A_ = dummy_past_residuals[: new_scheduler.config.solver_order]
A_ = sample, sample
for t in range(UpperCAmelCase , time_step + scheduler.config.solver_order + 1 ):
A_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
A_ = new_scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __A ( self : Tuple , UpperCAmelCase : List[Any]=0 , **UpperCAmelCase : Union[str, Any] ):
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop("num_inference_steps" , UpperCAmelCase )
A_ = self.dummy_sample
A_ = 0.1 * sample
A_ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
A_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase )
A_ = scheduler_class.from_pretrained(UpperCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
A_ = dummy_past_residuals[: new_scheduler.config.solver_order]
A_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
A_ = new_scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __A ( self : Any , UpperCAmelCase : List[str]=None , **UpperCAmelCase : int ):
if scheduler is None:
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config(**UpperCAmelCase )
A_ = scheduler_class(**UpperCAmelCase )
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config(**UpperCAmelCase )
A_ = scheduler_class(**UpperCAmelCase )
A_ = 10
A_ = self.dummy_model()
A_ = self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
A_ = model(UpperCAmelCase , UpperCAmelCase )
A_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
return sample
def __A ( self : Optional[Any] ):
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop("num_inference_steps" , UpperCAmelCase )
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCAmelCase )
A_ = self.dummy_sample
A_ = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCAmelCase , "set_timesteps" ):
scheduler.set_timesteps(UpperCAmelCase )
elif num_inference_steps is not None and not hasattr(UpperCAmelCase , "set_timesteps" ):
A_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
A_ = [residual + 0.2, residual + 0.15, residual + 0.10]
A_ = dummy_past_residuals[: scheduler.config.solver_order]
A_ = scheduler.timesteps[5]
A_ = scheduler.timesteps[6]
A_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
A_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __A ( self : int ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
A_ = UniPCMultistepScheduler(**self.get_scheduler_config() )
A_ = self.full_loop(scheduler=UpperCAmelCase )
A_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2_464 ) < 1E-3
A_ = DPMSolverSinglestepScheduler.from_config(scheduler.config )
A_ = DEISMultistepScheduler.from_config(scheduler.config )
A_ = DPMSolverMultistepScheduler.from_config(scheduler.config )
A_ = UniPCMultistepScheduler.from_config(scheduler.config )
A_ = self.full_loop(scheduler=UpperCAmelCase )
A_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2_464 ) < 1E-3
def __A ( self : Any ):
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase )
def __A ( self : Tuple ):
self.check_over_configs(thresholding=UpperCAmelCase )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=UpperCAmelCase , prediction_type=UpperCAmelCase , sample_max_value=UpperCAmelCase , solver_order=UpperCAmelCase , solver_type=UpperCAmelCase , )
def __A ( self : Dict ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase )
def __A ( self : str ):
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=UpperCAmelCase , solver_type=UpperCAmelCase , prediction_type=UpperCAmelCase , )
A_ = self.full_loop(
solver_order=UpperCAmelCase , solver_type=UpperCAmelCase , prediction_type=UpperCAmelCase , )
assert not torch.isnan(UpperCAmelCase ).any(), "Samples have nan numbers"
def __A ( self : List[Any] ):
self.check_over_configs(lower_order_final=UpperCAmelCase )
self.check_over_configs(lower_order_final=UpperCAmelCase )
def __A ( self : List[str] ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=UpperCAmelCase , time_step=0 )
def __A ( self : str ):
A_ = self.full_loop()
A_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2_464 ) < 1E-3
def __A ( self : Tuple ):
A_ = self.full_loop(prediction_type="v_prediction" )
A_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.1_014 ) < 1E-3
def __A ( self : Any ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config(thresholding=UpperCAmelCase , dynamic_thresholding_ratio=0 )
A_ = scheduler_class(**UpperCAmelCase )
A_ = 10
A_ = self.dummy_model()
A_ = self.dummy_sample_deter.half()
scheduler.set_timesteps(UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
A_ = model(UpperCAmelCase , UpperCAmelCase )
A_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
assert sample.dtype == torch.floataa
def __A ( self : Tuple , **UpperCAmelCase : List[Any] ):
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config(**UpperCAmelCase )
A_ = scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps | 312 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a = {
"configuration_mobilebert": [
"MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"MobileBertConfig",
"MobileBertOnnxConfig",
],
"tokenization_mobilebert": ["MobileBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["MobileBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileBertForMaskedLM",
"MobileBertForMultipleChoice",
"MobileBertForNextSentencePrediction",
"MobileBertForPreTraining",
"MobileBertForQuestionAnswering",
"MobileBertForSequenceClassification",
"MobileBertForTokenClassification",
"MobileBertLayer",
"MobileBertModel",
"MobileBertPreTrainedModel",
"load_tf_weights_in_mobilebert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileBertForMaskedLM",
"TFMobileBertForMultipleChoice",
"TFMobileBertForNextSentencePrediction",
"TFMobileBertForPreTraining",
"TFMobileBertForQuestionAnswering",
"TFMobileBertForSequenceClassification",
"TFMobileBertForTokenClassification",
"TFMobileBertMainLayer",
"TFMobileBertModel",
"TFMobileBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 66 | 0 |
import string
import numpy
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
return b if a == 0 else greatest_common_divisor(b % a , _lowercase )
class A__ :
lowercase = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
lowercase = numpy.vectorize(lambda __magic_name__ : x % 36 )
lowercase = numpy.vectorize(_lowerCAmelCase )
def __init__( self : Optional[int] , a : numpy.ndarray ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.modulus(a ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
lowerCAmelCase__ : Union[str, Any] = encrypt_key.shape[0]
def _lowerCamelCase ( self : Tuple , a : str ):
'''simple docstring'''
return self.key_string.index(a )
def _lowerCamelCase ( self : Tuple , a : int ):
'''simple docstring'''
return self.key_string[round(a )]
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
lowerCAmelCase__ : Any = det % len(self.key_string )
lowerCAmelCase__ : Union[str, Any] = len(self.key_string )
if greatest_common_divisor(a , len(self.key_string ) ) != 1:
lowerCAmelCase__ : str = (
f'''determinant modular {req_l} of encryption key({det}) '''
f'''is not co prime w.r.t {req_l}.\nTry another key.'''
)
raise ValueError(a )
def _lowerCamelCase ( self : Optional[int] , a : str ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = [char for char in text.upper() if char in self.key_string]
lowerCAmelCase__ : Union[str, Any] = chars[-1]
while len(a ) % self.break_key != 0:
chars.append(a )
return "".join(a )
def _lowerCamelCase ( self : Dict , a : str ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.process_text(text.upper() )
lowerCAmelCase__ : List[Any] = """"""
for i in range(0 , len(a ) - self.break_key + 1 , self.break_key ):
lowerCAmelCase__ : int = text[i : i + self.break_key]
lowerCAmelCase__ : int = [self.replace_letters(a ) for char in batch]
lowerCAmelCase__ : Optional[int] = numpy.array([vec] ).T
lowerCAmelCase__ : Any = self.modulus(self.encrypt_key.dot(a ) ).T.tolist()[
0
]
lowerCAmelCase__ : Optional[Any] = """""".join(
self.replace_digits(a ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
lowerCAmelCase__ : List[Any] = det % len(self.key_string )
lowerCAmelCase__ : Optional[int] = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
lowerCAmelCase__ : Dict = i
break
lowerCAmelCase__ : Optional[int] = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(a ) )
def _lowerCamelCase ( self : Optional[Any] , a : str ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.make_decrypt_key()
lowerCAmelCase__ : Tuple = self.process_text(text.upper() )
lowerCAmelCase__ : Optional[int] = """"""
for i in range(0 , len(a ) - self.break_key + 1 , self.break_key ):
lowerCAmelCase__ : Tuple = text[i : i + self.break_key]
lowerCAmelCase__ : Dict = [self.replace_letters(a ) for char in batch]
lowerCAmelCase__ : List[str] = numpy.array([vec] ).T
lowerCAmelCase__ : Optional[Any] = self.modulus(decrypt_key.dot(a ) ).T.tolist()[0]
lowerCAmelCase__ : int = """""".join(
self.replace_digits(a ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def lowerCAmelCase__ ( ) -> Optional[int]:
lowerCAmelCase__ : Dict = int(input('Enter the order of the encryption key: ' ) )
lowerCAmelCase__ : Union[str, Any] = []
print('Enter each row of the encryption key with space separated integers' )
for _ in range(_lowercase ):
lowerCAmelCase__ : Union[str, Any] = [int(_lowercase ) for x in input().split()]
hill_matrix.append(_lowercase )
lowerCAmelCase__ : List[Any] = HillCipher(numpy.array(_lowercase ) )
print('Would you like to encrypt or decrypt some text? (1 or 2)' )
lowerCAmelCase__ : int = input('\n1. Encrypt\n2. Decrypt\n' )
if option == "1":
lowerCAmelCase__ : Optional[Any] = input('What text would you like to encrypt?: ' )
print('Your encrypted text is:' )
print(hc.encrypt(_lowercase ) )
elif option == "2":
lowerCAmelCase__ : Dict = input('What text would you like to decrypt?: ' )
print('Your decrypted text is:' )
print(hc.decrypt(_lowercase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 212 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :Union[str, Any] = os.path.join(args.tf_model_dir, """parameters.json""" )
snake_case_ :Any = json.loads(open(_lowercase ).read() )
if not params:
raise ValueError(
f"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith(""".pt""" ):
snake_case_ :Optional[int] = args.output + """.pt"""
snake_case_ :List[str] = OrderedDict()
with tf.device("""/CPU:0""" ):
snake_case_ :Dict = tf.train.load_checkpoint(args.tf_model_dir )
snake_case_ :str = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
snake_case_ :List[Any] = reader.get_tensor(_lowercase ).astype(np.floataa )
if key_name.endswith("""/adam_m""" ) or key_name.endswith("""/adam_v""" ):
continue
if key_name.startswith("""pasts/""" ):
if key_name.startswith("""pasts/mlp""" ):
snake_case_ :Any = int(key_name[9] )
elif key_name.startswith("""pasts/out""" ):
snake_case_ :Optional[int] = 8
snake_case_ :List[str] = """model.sqout.%d.weight""" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
snake_case_ :Optional[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ :List[str] = torch.tensor(_lowercase )
elif key_name.startswith("""model/moe""" ):
snake_case_ :Tuple = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/switch_gating/kernel""" ):
snake_case_ :Union[str, Any] = """model.blocks.%d.feed_forward.mlp.router.classifier.weight""" % player
snake_case_ :Optional[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Optional[Any] = torch.tensor(_lowercase )
elif key_name.endswith("""/softmlp/kernel""" ):
snake_case_ :List[Any] = """model.blocks.%d.feed_forward.soft_bypass_mlp.weight""" % player
snake_case_ :Optional[int] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Optional[Any] = torch.tensor(_lowercase )
elif key_name.endswith("""/wo/kernel""" ) or key_name.endswith("""/wi/kernel""" ):
snake_case_ :Dict = key_name[-9:-7]
for i in range(16 ):
snake_case_ :str = """model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight""" % (player, i, nlayer)
snake_case_ :Tuple = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
snake_case_ :Optional[int] = torch.tensor(_lowercase )
elif key_name.startswith("""model/mlp""" ):
snake_case_ :Optional[int] = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/p1/kernel""" ):
snake_case_ :Union[str, Any] = """model.blocks.%d.feed_forward.mlp.wi.weight""" % player
snake_case_ :Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Optional[Any] = torch.tensor(_lowercase )
elif key_name.endswith("""/p1/bias""" ):
snake_case_ :List[Any] = """model.blocks.%d.feed_forward.mlp.wi.bias""" % player
snake_case_ :str = vnp.copy() # same because it is one dimensional
snake_case_ :Optional[Any] = torch.tensor(_lowercase )
elif key_name.endswith("""/p2/kernel""" ):
snake_case_ :Union[str, Any] = """model.blocks.%d.feed_forward.mlp.wo.weight""" % player
snake_case_ :Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Tuple = torch.tensor(_lowercase )
elif key_name.endswith("""/p2/bias""" ):
snake_case_ :Dict = """model.blocks.%d.feed_forward.mlp.wo.bias""" % player
snake_case_ :Any = vnp.copy() # same because it is one dimensional
snake_case_ :Optional[int] = torch.tensor(_lowercase )
elif key_name.startswith("""model/ln""" ):
snake_case_ :Union[str, Any] = int(key_name[8:].split("""/""" )[0] )
if key_name.endswith("""/b""" ):
snake_case_ :str = """model.blocks.%d.feed_forward.norm.bias""" % player
snake_case_ :Dict = vnp.copy() # same because it is one dimensional
snake_case_ :int = torch.tensor(_lowercase )
elif key_name.endswith("""/g""" ):
snake_case_ :Dict = """model.blocks.%d.feed_forward.norm.weight""" % player
snake_case_ :Dict = vnp.copy() # same because it is one dimensional
snake_case_ :Tuple = torch.tensor(_lowercase )
elif key_name.startswith("""model/att""" ):
snake_case_ :List[str] = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/qkv/kernel""" ):
snake_case_ :Optional[int] = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
snake_case_ :Dict = state[:, 0, :, :]
snake_case_ :int = state[:, 1, :, :]
snake_case_ :List[str] = state[:, 2, :, :]
snake_case_ :str = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Any = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Optional[int] = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ :int = """model.blocks.%d.self_attn.self_attn.q_proj.weight""" % player
snake_case_ :int = torch.tensor(_lowercase )
snake_case_ :Optional[Any] = """model.blocks.%d.self_attn.self_attn.k_proj.weight""" % player
snake_case_ :Dict = torch.tensor(_lowercase )
snake_case_ :Dict = """model.blocks.%d.self_attn.self_attn.v_proj.weight""" % player
snake_case_ :Optional[Any] = torch.tensor(_lowercase )
elif key_name.endswith("""/o/kernel""" ):
snake_case_ :str = """model.blocks.%d.self_attn.self_attn.out_proj.weight""" % player
snake_case_ :str = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Any = torch.tensor(_lowercase )
elif key_name.startswith("""model/an""" ):
snake_case_ :Optional[int] = int(key_name[8:].split("""/""" )[0] )
if key_name.endswith("""/b""" ):
snake_case_ :Any = """model.blocks.%d.self_attn.norm.bias""" % player
snake_case_ :Optional[int] = vnp.copy() # same because it is one dimensional
snake_case_ :Tuple = torch.tensor(_lowercase )
elif key_name.endswith("""/g""" ):
snake_case_ :Union[str, Any] = """model.blocks.%d.self_attn.norm.weight""" % player
snake_case_ :Dict = vnp.copy() # same because it is one dimensional
snake_case_ :Optional[int] = torch.tensor(_lowercase )
elif (
key_name.startswith("""model/wte""" )
or key_name.startswith("""model/wpe""" )
or key_name.startswith("""model/ete""" )
):
snake_case_ :List[Any] = {"""wte""": """embed_tokens""", """wpe""": """position_embeddings""", """ete""": """extra_position_embeddings"""}[
key_name[-3:]
]
snake_case_ :Optional[Any] = """model.%s.weight""" % nlayer
snake_case_ :Any = vnp.copy() # same in embedded
snake_case_ :List[Any] = torch.tensor(_lowercase )
if key_name.startswith("""model/wte""" ):
snake_case_ :Tuple = """lm_head.weight"""
snake_case_ :List[str] = vnp.copy() # same in embedded
snake_case_ :List[Any] = torch.tensor(_lowercase )
elif key_name.startswith("""model/wob""" ):
snake_case_ :str = """final_logits_bias"""
snake_case_ :Any = vnp.copy() # same in embedded
snake_case_ :List[Any] = state.reshape((1, -1) )
snake_case_ :Union[str, Any] = torch.tensor(_lowercase )
elif key_name == "model/dense/kernel":
snake_case_ :str = """model.last_project.weight"""
snake_case_ :Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ :int = torch.tensor(_lowercase )
elif key_name == "model/dense_1/bias":
snake_case_ :Optional[int] = """model.last_project.bias"""
snake_case_ :Tuple = vnp.copy() # same because it is one dimensional
snake_case_ :Any = torch.tensor(_lowercase )
torch.save(_lowercase, args.output )
if __name__ == "__main__":
__a = argparse.ArgumentParser(
description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model")
parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model")
__a = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 66 | 0 |
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def lowerCamelCase__ ( A__ : Tuple ):
'''simple docstring'''
__lowerCamelCase = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(_lowercase , _lowercase )
def lowerCamelCase__ ( A__ : Any ):
'''simple docstring'''
__lowerCamelCase = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
__lowerCamelCase = s_dict.pop(_lowercase )
elif "subsample" in key:
__lowerCamelCase = s_dict.pop(_lowercase )
def lowerCamelCase__ ( A__ : List[str] ):
'''simple docstring'''
__lowerCamelCase = emb.weight.shape
__lowerCamelCase = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
__lowerCamelCase = emb.weight.data
return lin_layer
def lowerCamelCase__ ( A__ : Tuple , A__ : Optional[Any] ):
'''simple docstring'''
__lowerCamelCase = torch.load(_lowercase , map_location="""cpu""" )
__lowerCamelCase = mam_aaa["""args"""]
__lowerCamelCase = mam_aaa["""model"""]
__lowerCamelCase = state_dict["""decoder.output_projection.weight"""]
remove_ignore_keys_(_lowercase )
rename_keys(_lowercase )
__lowerCamelCase = state_dict["""decoder.embed_tokens.weight"""].shape[0]
__lowerCamelCase = args.share_decoder_input_output_embed
__lowerCamelCase = [int(_lowercase ) for i in args.conv_kernel_sizes.split(""",""" )]
__lowerCamelCase = SpeechaTextConfig(
vocab_size=_lowercase , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , num_conv_layers=len(_lowercase ) , conv_channels=args.conv_channels , conv_kernel_sizes=_lowercase , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=_lowercase , num_beams=5 , max_length=200 , use_cache=_lowercase , decoder_start_token_id=2 , early_stopping=_lowercase , )
__lowerCamelCase = SpeechaTextForConditionalGeneration(_lowercase )
__lowerCamelCase = model.model.load_state_dict(_lowercase , strict=_lowercase )
if len(_lowercase ) > 0 and not set(_lowercase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"""Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"""
f' but all the following weights are missing {missing}' )
if tie_embeds:
__lowerCamelCase = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
__lowerCamelCase = lm_head_weights
model.save_pretrained(_lowercase )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
UpperCAmelCase_ = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 12 |
"""simple docstring"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
__a = pd.read_csv("sample_data.csv", header=None)
__a = df.shape[:1][0]
# If you're using some other dataset input the target column
__a = df.iloc[:, 1:2]
__a = actual_data.values.reshape(len_data, 1)
__a = MinMaxScaler().fit_transform(actual_data)
__a = 10
__a = 5
__a = 20
__a = len_data - periods * look_back
__a = actual_data[:division]
__a = actual_data[division - look_back :]
__a , __a = [], []
__a , __a = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
__a = np.array(train_x)
__a = np.array(test_x)
__a = np.array([list(i.ravel()) for i in train_y])
__a = np.array([list(i.ravel()) for i in test_y])
__a = Sequential()
model.add(LSTM(1_28, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(1_28, 1)))
model.add(Dense(forward_days))
model.compile(loss="mean_squared_error", optimizer="adam")
__a = model.fit(
x_train, y_train, epochs=1_50, verbose=1, shuffle=True, batch_size=4
)
__a = model.predict(x_test)
| 66 | 0 |
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
a__ : Tuple = {
'''text_branch''': '''text_model''',
'''audio_branch''': '''audio_model.audio_encoder''',
'''attn''': '''attention.self''',
'''self.proj''': '''output.dense''',
'''attention.self_mask''': '''attn_mask''',
'''mlp.fc1''': '''intermediate.dense''',
'''mlp.fc2''': '''output.dense''',
'''norm1''': '''layernorm_before''',
'''norm2''': '''layernorm_after''',
'''bn0''': '''batch_norm''',
}
a__ : Tuple = AutoFeatureExtractor.from_pretrained('''laion/clap-htsat-unfused''', truncation='''rand_trunc''')
def UpperCAmelCase_( a__ , a__=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = create_model(
'''HTSAT-tiny''' , '''roberta''' , _lowercase , precision='''fp32''' , device='''cuda:0''' if torch.cuda.is_available() else '''cpu''' , enable_fusion=_lowercase , fusion_type='''aff_2d''' if enable_fusion else None , )
return model, model_cfg
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = {}
SCREAMING_SNAKE_CASE : Optional[int] = r""".*sequential.(\d+).*"""
SCREAMING_SNAKE_CASE : Union[str, Any] = r""".*_projection.(\d+).*"""
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
SCREAMING_SNAKE_CASE : Tuple = key.replace(_lowercase , _lowercase )
if re.match(_lowercase , _lowercase ):
# replace sequential layers with list
SCREAMING_SNAKE_CASE : Union[str, Any] = re.match(_lowercase , _lowercase ).group(1 )
SCREAMING_SNAKE_CASE : Optional[int] = key.replace(F"""sequential.{sequential_layer}.""" , F"""layers.{int(_lowercase )//3}.linear.""" )
elif re.match(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : List[Any] = int(re.match(_lowercase , _lowercase ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
SCREAMING_SNAKE_CASE : List[Any] = 1 if projecton_layer == 0 else 2
SCREAMING_SNAKE_CASE : Optional[Any] = key.replace(F"""_projection.{projecton_layer}.""" , F"""_projection.linear{transformers_projection_layer}.""" )
if "audio" and "qkv" in key:
# split qkv into query key and value
SCREAMING_SNAKE_CASE : Dict = value
SCREAMING_SNAKE_CASE : str = mixed_qkv.size(0 ) // 3
SCREAMING_SNAKE_CASE : List[Any] = mixed_qkv[:qkv_dim]
SCREAMING_SNAKE_CASE : List[Any] = mixed_qkv[qkv_dim : qkv_dim * 2]
SCREAMING_SNAKE_CASE : Dict = mixed_qkv[qkv_dim * 2 :]
SCREAMING_SNAKE_CASE : Tuple = query_layer
SCREAMING_SNAKE_CASE : Optional[Any] = key_layer
SCREAMING_SNAKE_CASE : Any = value_layer
else:
SCREAMING_SNAKE_CASE : Any = value
return model_state_dict
def UpperCAmelCase_( a__ , a__ , a__ , a__=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = init_clap(_lowercase , enable_fusion=_lowercase )
clap_model.eval()
SCREAMING_SNAKE_CASE : Any = clap_model.state_dict()
SCREAMING_SNAKE_CASE : Dict = rename_state_dict(_lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = ClapConfig()
SCREAMING_SNAKE_CASE : Optional[int] = enable_fusion
SCREAMING_SNAKE_CASE : Dict = ClapModel(_lowercase )
# ignore the spectrogram embedding layer
model.load_state_dict(_lowercase , strict=_lowercase )
model.save_pretrained(_lowercase )
transformers_config.save_pretrained(_lowercase )
if __name__ == "__main__":
a__ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument('''--enable_fusion''', action='''store_true''', help='''Whether to enable fusion or not''')
a__ : List[Any] = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 313 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a = {
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 66 | 0 |
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class A__ ( _lowerCAmelCase):
def __init__( self , *__magic_name__ , __magic_name__=None , __magic_name__=None , **__magic_name__ ):
super().__init__(*__magic_name__ , **__magic_name__ )
lowerCamelCase : List[Any] = eval_examples
lowerCamelCase : Optional[Any] = post_process_function
def UpperCamelCase__ ( self , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__ = "eval" ):
lowerCamelCase : Dict = self.eval_dataset if eval_dataset is None else eval_dataset
lowerCamelCase : str = self.get_eval_dataloader(__magic_name__ )
lowerCamelCase : Union[str, Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowerCamelCase : str = self.compute_metrics
lowerCamelCase : List[str] = None
lowerCamelCase : Any = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
lowerCamelCase : Optional[int] = time.time()
try:
lowerCamelCase : Optional[Any] = eval_loop(
__magic_name__ , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__magic_name__ , metric_key_prefix=__magic_name__ , )
finally:
lowerCamelCase : Optional[int] = compute_metrics
lowerCamelCase : Dict = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
__magic_name__ , __magic_name__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowerCamelCase : str = self.post_process_function(__magic_name__ , __magic_name__ , output.predictions )
lowerCamelCase : str = self.compute_metrics(__magic_name__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
lowerCamelCase : Tuple = metrics.pop(__magic_name__ )
metrics.update(output.metrics )
else:
lowerCamelCase : int = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__magic_name__ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowerCamelCase : Union[str, Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , __magic_name__ )
return metrics
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__ = "test" ):
lowerCamelCase : List[str] = self.get_test_dataloader(__magic_name__ )
# Temporarily disable metric computation, we will do it in the loop here.
lowerCamelCase : Optional[Any] = self.compute_metrics
lowerCamelCase : int = None
lowerCamelCase : List[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
lowerCamelCase : Optional[Any] = time.time()
try:
lowerCamelCase : Dict = eval_loop(
__magic_name__ , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__magic_name__ , metric_key_prefix=__magic_name__ , )
finally:
lowerCamelCase : Any = compute_metrics
lowerCamelCase : int = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
__magic_name__ , __magic_name__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowerCamelCase : Optional[Any] = self.post_process_function(__magic_name__ , __magic_name__ , output.predictions , """predict""" )
lowerCamelCase : Optional[Any] = self.compute_metrics(__magic_name__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
lowerCamelCase : Union[str, Any] = metrics.pop(__magic_name__ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__magic_name__ )
| 287 |
"""simple docstring"""
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :int = XCLIPTextConfig()
# derive patch size from model name
snake_case_ :Union[str, Any] = model_name.find("""patch""" )
snake_case_ :List[str] = int(model_name[start_idx + len("""patch""" ) : start_idx + len("""patch""" ) + 2] )
snake_case_ :Any = XCLIPVisionConfig(patch_size=_lowercase, num_frames=_lowercase )
if "large" in model_name:
snake_case_ :Optional[Any] = 768
snake_case_ :Union[str, Any] = 3072
snake_case_ :Any = 12
snake_case_ :Any = 1024
snake_case_ :str = 4096
snake_case_ :Union[str, Any] = 16
snake_case_ :Union[str, Any] = 24
snake_case_ :Tuple = 768
snake_case_ :Any = 3072
if model_name == "xclip-large-patch14-16-frames":
snake_case_ :Any = 336
snake_case_ :Any = XCLIPConfig.from_text_vision_configs(_lowercase, _lowercase )
if "large" in model_name:
snake_case_ :List[Any] = 768
return config
def A_ ( _lowercase ):
'''simple docstring'''
if name == "token_embedding.weight":
snake_case_ :Optional[Any] = name.replace("""token_embedding.weight""", """text_model.embeddings.token_embedding.weight""" )
if name == "positional_embedding":
snake_case_ :Tuple = name.replace("""positional_embedding""", """text_model.embeddings.position_embedding.weight""" )
if "ln_1" in name:
snake_case_ :Dict = name.replace("""ln_1""", """layer_norm1""" )
if "ln_2" in name:
snake_case_ :str = name.replace("""ln_2""", """layer_norm2""" )
if "c_fc" in name:
snake_case_ :str = name.replace("""c_fc""", """fc1""" )
if "c_proj" in name:
snake_case_ :int = name.replace("""c_proj""", """fc2""" )
if name.startswith("""transformer.resblocks""" ):
snake_case_ :Union[str, Any] = name.replace("""transformer.resblocks""", """text_model.encoder.layers""" )
if "attn.out_proj" in name and "message" not in name:
snake_case_ :Union[str, Any] = name.replace("""attn.out_proj""", """self_attn.out_proj""" )
if "ln_final" in name:
snake_case_ :Union[str, Any] = name.replace("""ln_final""", """text_model.final_layer_norm""" )
# visual encoder
if name == "visual.class_embedding":
snake_case_ :Any = name.replace("""visual.class_embedding""", """vision_model.embeddings.class_embedding""" )
if name == "visual.positional_embedding":
snake_case_ :Optional[int] = name.replace("""visual.positional_embedding""", """vision_model.embeddings.position_embedding.weight""" )
if name.startswith("""visual.transformer.resblocks""" ):
snake_case_ :Union[str, Any] = name.replace("""visual.transformer.resblocks""", """vision_model.encoder.layers""" )
if "visual.conv1" in name:
snake_case_ :int = name.replace("""visual.conv1""", """vision_model.embeddings.patch_embedding""" )
if "visual.ln_pre" in name:
snake_case_ :Any = name.replace("""visual.ln_pre""", """vision_model.pre_layernorm""" )
if "visual.ln_post" in name:
snake_case_ :str = name.replace("""visual.ln_post""", """vision_model.post_layernorm""" )
if "visual.proj" in name:
snake_case_ :Union[str, Any] = name.replace("""visual.proj""", """visual_projection.weight""" )
if "text_projection" in name:
snake_case_ :Dict = name.replace("""text_projection""", """text_projection.weight""" )
# things on top
if "prompts_visual_proj" in name:
snake_case_ :List[str] = name.replace("""prompts_visual_proj""", """prompts_visual_projection""" )
if "prompts_visual_ln" in name:
snake_case_ :Dict = name.replace("""prompts_visual_ln""", """prompts_visual_layernorm""" )
# mit
if name == "mit.positional_embedding":
snake_case_ :str = name.replace("""positional""", """position""" )
if name.startswith("""mit.resblocks""" ):
snake_case_ :Dict = name.replace("""mit.resblocks""", """mit.encoder.layers""" )
# prompts generator
if name.startswith("""prompts_generator.norm""" ):
snake_case_ :Union[str, Any] = name.replace("""prompts_generator.norm""", """prompts_generator.layernorm""" )
return name
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
snake_case_ :Dict = orig_state_dict.pop(_lowercase )
if "attn.in_proj" in key:
snake_case_ :Optional[Any] = key.split(""".""" )
if key.startswith("""visual""" ):
snake_case_ :Any = key_split[3]
snake_case_ :Optional[Any] = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
snake_case_ :str = val[
:dim, :
]
snake_case_ :Optional[int] = val[
dim : dim * 2, :
]
snake_case_ :Union[str, Any] = val[
-dim:, :
]
else:
snake_case_ :Dict = val[
:dim
]
snake_case_ :Optional[int] = val[
dim : dim * 2
]
snake_case_ :Optional[int] = val[
-dim:
]
else:
if "weight" in key:
snake_case_ :Optional[Any] = val[
:dim, :
]
snake_case_ :List[str] = val[
dim : dim * 2, :
]
snake_case_ :Dict = val[
-dim:, :
]
else:
snake_case_ :Union[str, Any] = val[:dim]
snake_case_ :Union[str, Any] = val[
dim : dim * 2
]
snake_case_ :Union[str, Any] = val[-dim:]
elif key.startswith("""mit""" ):
snake_case_ :Tuple = key_split[2]
snake_case_ :Union[str, Any] = config.vision_config.mit_hidden_size
if "weight" in key:
snake_case_ :Optional[int] = val[:dim, :]
snake_case_ :Optional[int] = val[dim : dim * 2, :]
snake_case_ :str = val[-dim:, :]
else:
snake_case_ :str = val[:dim]
snake_case_ :Any = val[dim : dim * 2]
snake_case_ :int = val[-dim:]
else:
snake_case_ :Tuple = key_split[2]
snake_case_ :Any = config.text_config.hidden_size
if "weight" in key:
snake_case_ :Dict = val[:dim, :]
snake_case_ :Dict = val[
dim : dim * 2, :
]
snake_case_ :List[str] = val[-dim:, :]
else:
snake_case_ :Any = val[:dim]
snake_case_ :Tuple = val[
dim : dim * 2
]
snake_case_ :List[str] = val[-dim:]
else:
snake_case_ :Optional[int] = rename_key(_lowercase )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
snake_case_ :Optional[Any] = val.T
snake_case_ :Tuple = val
return orig_state_dict
def A_ ( _lowercase ):
'''simple docstring'''
if num_frames == 8:
snake_case_ :str = """eating_spaghetti_8_frames.npy"""
elif num_frames == 16:
snake_case_ :int = """eating_spaghetti.npy"""
elif num_frames == 32:
snake_case_ :List[str] = """eating_spaghetti_32_frames.npy"""
snake_case_ :int = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""", filename=_lowercase, repo_type="""dataset""", )
snake_case_ :Union[str, Any] = np.load(_lowercase )
return list(_lowercase )
def A_ ( _lowercase, _lowercase=None, _lowercase=False ):
'''simple docstring'''
snake_case_ :List[Any] = {
# fully supervised kinetics-400 checkpoints
"""xclip-base-patch32""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth""",
"""xclip-base-patch32-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"""
),
"""xclip-base-patch16""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth""",
"""xclip-base-patch16-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"""
),
"""xclip-large-patch14""": """https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb""",
"""xclip-large-patch14-16-frames""": """https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f""",
# fully supervised kinetics-600 checkpoints
"""xclip-base-patch16-kinetics-600""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"""
),
"""xclip-base-patch16-kinetics-600-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"""
),
"""xclip-large-patch14-kinetics-600""": """https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be""",
# few shot
"""xclip-base-patch16-hmdb-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"""
),
"""xclip-base-patch16-hmdb-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"""
),
"""xclip-base-patch16-hmdb-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"""
),
"""xclip-base-patch16-hmdb-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"""
),
"""xclip-base-patch16-ucf-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"""
),
"""xclip-base-patch16-ucf-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"""
),
"""xclip-base-patch16-ucf-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"""
),
"""xclip-base-patch16-ucf-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"""
),
# zero shot
"""xclip-base-patch16-zero-shot""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth""",
}
snake_case_ :Optional[int] = model_to_url[model_name]
snake_case_ :int = 8
if "16-frames" in model_name:
snake_case_ :List[Any] = 16
elif "shot" in model_name:
snake_case_ :Dict = 32
snake_case_ :Optional[int] = get_xclip_config(_lowercase, _lowercase )
snake_case_ :Optional[Any] = XCLIPModel(_lowercase )
model.eval()
if "drive" in checkpoint_url:
snake_case_ :List[str] = """pytorch_model.bin"""
gdown.cached_download(_lowercase, _lowercase, quiet=_lowercase )
snake_case_ :List[Any] = torch.load(_lowercase, map_location="""cpu""" )["""model"""]
else:
snake_case_ :Tuple = torch.hub.load_state_dict_from_url(_lowercase )["""model"""]
snake_case_ :Union[str, Any] = convert_state_dict(_lowercase, _lowercase )
snake_case_ :str = XCLIPModel(_lowercase )
snake_case_, snake_case_ :Optional[int] = model.load_state_dict(_lowercase, strict=_lowercase )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
snake_case_ :List[str] = 336 if model_name == """xclip-large-patch14-16-frames""" else 224
snake_case_ :List[Any] = VideoMAEImageProcessor(size=_lowercase )
snake_case_ :Any = CLIPTokenizer.from_pretrained("""openai/clip-vit-base-patch32""" )
snake_case_ :str = CLIPTokenizerFast.from_pretrained("""openai/clip-vit-base-patch32""" )
snake_case_ :Optional[Any] = XCLIPProcessor(image_processor=_lowercase, tokenizer=_lowercase )
snake_case_ :Optional[int] = prepare_video(_lowercase )
snake_case_ :Optional[Any] = processor(
text=["""playing sports""", """eating spaghetti""", """go shopping"""], videos=_lowercase, return_tensors="""pt""", padding=_lowercase )
print("""Shape of pixel values:""", inputs.pixel_values.shape )
with torch.no_grad():
snake_case_ :List[Any] = model(**_lowercase )
# Verify outputs
snake_case_ :List[Any] = outputs.logits_per_video
snake_case_ :Any = logits_per_video.softmax(dim=1 )
print("""Probs:""", _lowercase )
# kinetics-400
if model_name == "xclip-base-patch32":
snake_case_ :Union[str, Any] = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
snake_case_ :str = torch.tensor([[7.09_99e-04, 9.98_83e-01, 4.55_80e-04]] )
elif model_name == "xclip-base-patch16":
snake_case_ :Tuple = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
snake_case_ :Any = torch.tensor([[7.69_37e-04, 9.97_28e-01, 1.94_73e-03]] )
elif model_name == "xclip-large-patch14":
snake_case_ :str = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
snake_case_ :Tuple = torch.tensor([[3.38_77e-04, 9.99_37e-01, 2.88_88e-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
snake_case_ :List[Any] = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
snake_case_ :Union[str, Any] = torch.tensor([[3.85_54e-04, 9.99_29e-01, 3.27_54e-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
snake_case_ :List[Any] = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
snake_case_ :Dict = torch.tensor([[7.18_90e-06, 9.99_94e-01, 5.65_59e-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
snake_case_ :Union[str, Any] = torch.tensor([[1.03_20e-05, 9.99_93e-01, 6.24_35e-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
snake_case_ :str = torch.tensor([[4.13_77e-06, 9.99_90e-01, 9.83_86e-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
snake_case_ :str = torch.tensor([[4.13_47e-05, 9.99_62e-01, 3.34_11e-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
snake_case_ :int = torch.tensor([[8.58_57e-05, 9.99_28e-01, 6.32_91e-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
snake_case_ :Optional[int] = torch.tensor([[8.58_57e-05, 9.99_28e-01, 6.32_91e-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
snake_case_ :Any = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
snake_case_ :Tuple = torch.tensor([[9.82_19e-04, 9.95_93e-01, 3.08_63e-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
snake_case_ :Union[str, Any] = torch.tensor([[3.50_82e-04, 9.97_85e-01, 1.79_66e-03]] )
else:
raise ValueError(f"""Model name {model_name} not supported""" )
assert torch.allclose(_lowercase, _lowercase, atol=1e-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
if push_to_hub:
print("""Pushing model, processor and slow tokenizer files to the hub...""" )
model.push_to_hub(_lowercase, organization="""nielsr""" )
processor.push_to_hub(_lowercase, organization="""nielsr""" )
slow_tokenizer.push_to_hub(_lowercase, organization="""nielsr""" )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="xclip-base-patch32",
type=str,
help="Name of the model.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__a = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 66 | 0 |
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
lowerCamelCase_ = logging.get_logger(__name__)
enable_full_determinism()
class __A( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = UNetaDModel
SCREAMING_SNAKE_CASE__ = """sample"""
@property
def UpperCAmelCase_ (self ):
UpperCamelCase__ = 4
UpperCamelCase__ = 3
UpperCamelCase__ = (32, 32)
UpperCamelCase__ = floats_tensor((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = torch.tensor([10] ).to(SCREAMING_SNAKE_CASE_ )
return {"sample": noise, "timestep": time_step}
@property
def UpperCAmelCase_ (self ):
return (3, 32, 32)
@property
def UpperCAmelCase_ (self ):
return (3, 32, 32)
def UpperCAmelCase_ (self ):
UpperCamelCase__ = {
"""block_out_channels""": (32, 64),
"""down_block_types""": ("""DownBlock2D""", """AttnDownBlock2D"""),
"""up_block_types""": ("""AttnUpBlock2D""", """UpBlock2D"""),
"""attention_head_dim""": 3,
"""out_channels""": 3,
"""in_channels""": 3,
"""layers_per_block""": 2,
"""sample_size""": 32,
}
UpperCamelCase__ = self.dummy_input
return init_dict, inputs_dict
class __A( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = UNetaDModel
SCREAMING_SNAKE_CASE__ = """sample"""
@property
def UpperCAmelCase_ (self ):
UpperCamelCase__ = 4
UpperCamelCase__ = 4
UpperCamelCase__ = (32, 32)
UpperCamelCase__ = floats_tensor((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = torch.tensor([10] ).to(SCREAMING_SNAKE_CASE_ )
return {"sample": noise, "timestep": time_step}
@property
def UpperCAmelCase_ (self ):
return (4, 32, 32)
@property
def UpperCAmelCase_ (self ):
return (4, 32, 32)
def UpperCAmelCase_ (self ):
UpperCamelCase__ = {
"""sample_size""": 32,
"""in_channels""": 4,
"""out_channels""": 4,
"""layers_per_block""": 2,
"""block_out_channels""": (32, 64),
"""attention_head_dim""": 32,
"""down_block_types""": ("""DownBlock2D""", """DownBlock2D"""),
"""up_block_types""": ("""UpBlock2D""", """UpBlock2D"""),
}
UpperCamelCase__ = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase_ (self ):
UpperCamelCase__ = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def UpperCAmelCase_ (self ):
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
UpperCamelCase__ = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=SCREAMING_SNAKE_CASE_ )
model_accelerate.to(SCREAMING_SNAKE_CASE_ )
model_accelerate.eval()
UpperCamelCase__ = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
UpperCamelCase__ = noise.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = torch.tensor([10] * noise.shape[0] ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model_accelerate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )["""sample"""]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
UpperCamelCase__ = UNetaDModel.from_pretrained(
"""fusing/unet-ldm-dummy-update""" , output_loading_info=SCREAMING_SNAKE_CASE_ , low_cpu_mem_usage=SCREAMING_SNAKE_CASE_ )
model_normal_load.to(SCREAMING_SNAKE_CASE_ )
model_normal_load.eval()
UpperCamelCase__ = model_normal_load(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )["""sample"""]
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , rtol=1E-3 )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" )
model.eval()
model.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
UpperCamelCase__ = noise.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = torch.tensor([10] * noise.shape[0] ).to(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sample
UpperCamelCase__ = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
UpperCamelCase__ = torch.tensor([-13.32_58, -20.11_00, -15.98_73, -17.66_17, -23.05_96, -17.94_19, -13.36_75, -16.18_89, -12.38_00] )
# fmt: on
self.assertTrue(torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , rtol=1E-3 ) )
class __A( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = UNetaDModel
SCREAMING_SNAKE_CASE__ = """sample"""
@property
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_=(32, 32) ):
UpperCamelCase__ = 4
UpperCamelCase__ = 3
UpperCamelCase__ = floats_tensor((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=SCREAMING_SNAKE_CASE_ )
return {"sample": noise, "timestep": time_step}
@property
def UpperCAmelCase_ (self ):
return (3, 32, 32)
@property
def UpperCAmelCase_ (self ):
return (3, 32, 32)
def UpperCAmelCase_ (self ):
UpperCamelCase__ = {
"""block_out_channels""": [32, 64, 64, 64],
"""in_channels""": 3,
"""layers_per_block""": 1,
"""out_channels""": 3,
"""time_embedding_type""": """fourier""",
"""norm_eps""": 1E-6,
"""mid_block_scale_factor""": math.sqrt(2.0 ),
"""norm_num_groups""": None,
"""down_block_types""": [
"""SkipDownBlock2D""",
"""AttnSkipDownBlock2D""",
"""SkipDownBlock2D""",
"""SkipDownBlock2D""",
],
"""up_block_types""": [
"""SkipUpBlock2D""",
"""SkipUpBlock2D""",
"""AttnSkipUpBlock2D""",
"""SkipUpBlock2D""",
],
}
UpperCamelCase__ = self.dummy_input
return init_dict, inputs_dict
@slow
def UpperCAmelCase_ (self ):
UpperCamelCase__ = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" , output_loading_info=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.dummy_input
UpperCamelCase__ = floats_tensor((4, 3) + (2_56, 2_56) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = noise
UpperCamelCase__ = model(**SCREAMING_SNAKE_CASE_ )
assert image is not None, "Make sure output is not None"
@slow
def UpperCAmelCase_ (self ):
UpperCamelCase__ = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" )
model.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = 4
UpperCamelCase__ = 3
UpperCamelCase__ = (2_56, 2_56)
UpperCamelCase__ = torch.ones((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = torch.tensor(batch_size * [1E-4] ).to(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sample
UpperCamelCase__ = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
UpperCamelCase__ = torch.tensor([-48_42.86_91, -64_99.66_31, -38_00.19_53, -79_78.26_86, -1_09_80.71_29, -2_00_28.85_35, 81_48.28_22, 23_42.29_05, 5_67.76_08] )
# fmt: on
self.assertTrue(torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , rtol=1E-2 ) )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = UNetaDModel.from_pretrained("""fusing/ncsnpp-ffhq-ve-dummy-update""" )
model.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = 4
UpperCamelCase__ = 3
UpperCamelCase__ = (32, 32)
UpperCamelCase__ = torch.ones((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = torch.tensor(batch_size * [1E-4] ).to(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sample
UpperCamelCase__ = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
UpperCamelCase__ = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256] )
# fmt: on
self.assertTrue(torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , rtol=1E-2 ) )
def UpperCAmelCase_ (self ):
# not required for this model
pass
| 244 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self: List[Any] , snake_case: List[str] , snake_case: Optional[Any]=13 , snake_case: List[str]=7 , snake_case: Dict=True , snake_case: List[str]=True , snake_case: Optional[int]=True , snake_case: Any=True , snake_case: Optional[Any]=99 , snake_case: Tuple=32 , snake_case: Tuple=5 , snake_case: Dict=4 , snake_case: Optional[Any]=37 , snake_case: Union[str, Any]="gelu" , snake_case: Tuple=0.1 , snake_case: List[Any]=0.1 , snake_case: List[str]=512 , snake_case: Optional[int]=16 , snake_case: int=2 , snake_case: List[Any]=0.0_2 , snake_case: Union[str, Any]=4 , ) -> List[str]:
snake_case_ :Dict = parent
snake_case_ :Any = batch_size
snake_case_ :Any = seq_length
snake_case_ :List[str] = is_training
snake_case_ :Optional[Any] = use_attention_mask
snake_case_ :Dict = use_token_type_ids
snake_case_ :Union[str, Any] = use_labels
snake_case_ :str = vocab_size
snake_case_ :int = hidden_size
snake_case_ :List[str] = num_hidden_layers
snake_case_ :Dict = num_attention_heads
snake_case_ :Any = intermediate_size
snake_case_ :Tuple = hidden_act
snake_case_ :int = hidden_dropout_prob
snake_case_ :Optional[Any] = attention_probs_dropout_prob
snake_case_ :Any = max_position_embeddings
snake_case_ :Union[str, Any] = type_vocab_size
snake_case_ :Optional[int] = type_sequence_label_size
snake_case_ :Union[str, Any] = initializer_range
snake_case_ :Tuple = num_choices
def lowerCAmelCase_ ( self: Tuple ) -> str:
snake_case_ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ :Union[str, Any] = None
if self.use_attention_mask:
snake_case_ :str = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ :Any = None
if self.use_token_type_ids:
snake_case_ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ :int = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase_ ( self: Optional[int] ) -> int:
snake_case_ :str = self.prepare_config_and_inputs()
snake_case_, snake_case_, snake_case_, snake_case_ :Optional[int] = config_and_inputs
snake_case_ :Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowerCAmelCase_ ( self: Optional[Any] ) -> Any:
snake_case_ :int = self.prepare_config_and_inputs()
snake_case_, snake_case_, snake_case_, snake_case_ :Dict = config_and_inputs
snake_case_ :Union[str, Any] = True
snake_case_ :Optional[int] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case_ :Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class lowerCamelCase ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_A : List[str] = True
_A : Dict = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase_ ( self: int ) -> List[str]:
snake_case_ :Any = FlaxBertModelTester(self )
@slow
def lowerCAmelCase_ ( self: List[str] ) -> Dict:
# Only check this for base model, not necessary for all model classes.
# This will also help speed-up tests.
snake_case_ :Dict = FlaxBertModel.from_pretrained("""bert-base-cased""" )
snake_case_ :Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(snake_case )
| 66 | 0 |
"""simple docstring"""
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
lowerCamelCase_ : Optional[Any] = logging.getLogger(__name__)
class __A ( _lowerCAmelCase ):
"""simple docstring"""
__lowerCAmelCase = """token-classification"""
def __init__( self , __A ) -> List[Any]:
if type(__A ) == dict:
a =Namespace(**__A )
a =import_module('''tasks''' )
try:
a =getattr(__A , hparams.task_type )
a =token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
a =self.token_classification_task.get_labels(hparams.labels )
a =CrossEntropyLoss().ignore_index
super().__init__(__A , len(self.labels ) , self.mode )
def SCREAMING_SNAKE_CASE ( self , **__A ) -> Any:
return self.model(**__A )
def SCREAMING_SNAKE_CASE ( self , __A , __A ) -> Optional[int]:
a ={"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
a =(
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
a =self(**__A )
a =outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
a =self.hparams
for mode in ["train", "dev", "test"]:
a =self._feature_file(__A )
if os.path.exists(__A ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , __A )
a =torch.load(__A )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
a =self.token_classification_task.read_examples_from_file(args.data_dir , __A )
a =self.token_classification_task.convert_examples_to_features(
__A , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ['''xlnet'''] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ['''xlnet'''] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=__A , pad_on_left=bool(self.config.model_type in ['''xlnet'''] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info('''Saving features into cached file %s''' , __A )
torch.save(__A , __A )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A = False ) -> DataLoader:
a =self._feature_file(__A )
logger.info('''Loading features from cached file %s''' , __A )
a =torch.load(__A )
a =torch.tensor([f.input_ids for f in features] , dtype=torch.long )
a =torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
a =torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
a =torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
a =torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(__A , __A , __A , __A ) , batch_size=__A )
def SCREAMING_SNAKE_CASE ( self , __A , __A ) -> List[str]:
"""Compute validation""" ""
a ={"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
a =(
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
a =self(**__A )
a =outputs[:2]
a =logits.detach().cpu().numpy()
a =inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def SCREAMING_SNAKE_CASE ( self , __A ) -> Tuple:
a =torch.stack([x['''val_loss'''] for x in outputs] ).mean()
a =np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
a =np.argmax(__A , axis=2 )
a =np.concatenate([x['''target'''] for x in outputs] , axis=0 )
a =dict(enumerate(self.labels ) )
a =[[] for _ in range(out_label_ids.shape[0] )]
a =[[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
a ={
"""val_loss""": val_loss_mean,
"""accuracy_score""": accuracy_score(__A , __A ),
"""precision""": precision_score(__A , __A ),
"""recall""": recall_score(__A , __A ),
"""f1""": fa_score(__A , __A ),
}
a =dict(results.items() )
a =results
return ret, preds_list, out_label_list
def SCREAMING_SNAKE_CASE ( self , __A ) -> Optional[Any]:
# when stable
a =self._eval_end(__A )
a =ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def SCREAMING_SNAKE_CASE ( self , __A ) -> Any:
# updating to test_epoch_end instead of deprecated test_end
a =self._eval_end(__A )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
a =ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def SCREAMING_SNAKE_CASE ( __A , __A ) -> Dict:
# Add NER specific options
BaseTransformer.add_model_specific_args(__A , __A )
parser.add_argument(
'''--task_type''' , default='''NER''' , type=__A , help='''Task type to fine tune in training (e.g. NER, POS, etc)''' )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=__A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--labels''' , default='''''' , type=__A , help='''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=__A , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
if __name__ == "__main__":
lowerCamelCase_ : Optional[Any] = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
lowerCamelCase_ : List[Any] = NERTransformer.add_model_specific_args(parser, os.getcwd())
lowerCamelCase_ : Any = parser.parse_args()
lowerCamelCase_ : Optional[Any] = NERTransformer(args)
lowerCamelCase_ : int = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
lowerCamelCase_ : Optional[Any] = sorted(glob.glob(os.path.join(args.output_dir, """checkpoint-epoch=*.ckpt"""), recursive=True))
lowerCamelCase_ : Tuple = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model) | 81 |
"""simple docstring"""
import math
class lowerCamelCase :
'''simple docstring'''
def lowerCAmelCase_ ( self: Tuple , snake_case: list[list[float]] , snake_case: list[int] ) -> int:
snake_case_ :Any = 0.0
snake_case_ :Tuple = 0.0
for i in range(len(snake_case ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def lowerCAmelCase_ ( self: Optional[int] , snake_case: list[list[int | float]] , snake_case: list[int] , snake_case: int , snake_case: float ) -> list[list[int | float]]:
for i in range(len(snake_case ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def A_ ( ):
'''simple docstring'''
snake_case_ :Dict = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
snake_case_ :List[Any] = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
snake_case_ :Optional[Any] = SelfOrganizingMap()
snake_case_ :Dict = 3
snake_case_ :Dict = 0.5
for _ in range(_lowercase ):
for j in range(len(_lowercase ) ):
# training sample
snake_case_ :List[Any] = training_samples[j]
# Compute the winning vector
snake_case_ :Optional[int] = self_organizing_map.get_winner(_lowercase, _lowercase )
# Update the winning vector
snake_case_ :List[str] = self_organizing_map.update(_lowercase, _lowercase, _lowercase, _lowercase )
# classify test sample
snake_case_ :str = [0, 0, 0, 1]
snake_case_ :List[Any] = self_organizing_map.get_winner(_lowercase, _lowercase )
# results
print(f"""Clusters that the test sample belongs to : {winner}""" )
print(f"""Weights that have been trained : {weights}""" )
# running the main() function
if __name__ == "__main__":
main()
| 66 | 0 |
'''simple docstring'''
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[str] ) -> Union[str, Any]:
_a : Any =SwinConfig()
_a : List[str] =swin_name.split("""_""" )
_a : Optional[Any] =name_split[1]
_a : Union[str, Any] =int(name_split[4] )
_a : List[Any] =int(name_split[3][-1] )
if model_size == "tiny":
_a : Union[str, Any] =96
_a : Dict =(2, 2, 6, 2)
_a : List[str] =(3, 6, 12, 24)
elif model_size == "small":
_a : str =96
_a : List[str] =(2, 2, 18, 2)
_a : Optional[Any] =(3, 6, 12, 24)
elif model_size == "base":
_a : str =128
_a : List[Any] =(2, 2, 18, 2)
_a : Union[str, Any] =(4, 8, 16, 32)
else:
_a : str =192
_a : Union[str, Any] =(2, 2, 18, 2)
_a : Optional[int] =(6, 12, 24, 48)
if "in22k" in swin_name:
_a : Union[str, Any] =21841
else:
_a : List[Any] =1000
_a : Any ="""huggingface/label-files"""
_a : Dict ="""imagenet-1k-id2label.json"""
_a : List[Any] =json.load(open(hf_hub_download(_lowercase ,_lowercase ,repo_type="""dataset""" ) ,"""r""" ) )
_a : Optional[int] ={int(_lowercase ): v for k, v in idalabel.items()}
_a : List[Any] =idalabel
_a : Any ={v: k for k, v in idalabel.items()}
_a : List[str] =img_size
_a : Optional[Any] =num_classes
_a : List[Any] =embed_dim
_a : Dict =depths
_a : List[Any] =num_heads
_a : List[str] =window_size
return config
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[str] ) -> Any:
if "patch_embed.proj" in name:
_a : Union[str, Any] =name.replace("""patch_embed.proj""" ,"""embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
_a : Tuple =name.replace("""patch_embed.norm""" ,"""embeddings.norm""" )
if "layers" in name:
_a : List[Any] ="""encoder.""" + name
if "attn.proj" in name:
_a : Dict =name.replace("""attn.proj""" ,"""attention.output.dense""" )
if "attn" in name:
_a : int =name.replace("""attn""" ,"""attention.self""" )
if "norm1" in name:
_a : Dict =name.replace("""norm1""" ,"""layernorm_before""" )
if "norm2" in name:
_a : Optional[int] =name.replace("""norm2""" ,"""layernorm_after""" )
if "mlp.fc1" in name:
_a : Any =name.replace("""mlp.fc1""" ,"""intermediate.dense""" )
if "mlp.fc2" in name:
_a : List[str] =name.replace("""mlp.fc2""" ,"""output.dense""" )
if name == "norm.weight":
_a : List[str] ="""layernorm.weight"""
if name == "norm.bias":
_a : Union[str, Any] ="""layernorm.bias"""
if "head" in name:
_a : Dict =name.replace("""head""" ,"""classifier""" )
else:
_a : Dict ="""swin.""" + name
return name
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Dict ,_UpperCAmelCase : List[str] ) -> Optional[Any]:
for key in orig_state_dict.copy().keys():
_a : List[Any] =orig_state_dict.pop(_lowercase )
if "mask" in key:
continue
elif "qkv" in key:
_a : Any =key.split(""".""" )
_a : int =int(key_split[1] )
_a : str =int(key_split[3] )
_a : Dict =model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_a : int =val[:dim, :]
_a : Any =val[
dim : dim * 2, :
]
_a : List[Any] =val[-dim:, :]
else:
_a : Any =val[
:dim
]
_a : str =val[
dim : dim * 2
]
_a : Any =val[
-dim:
]
else:
_a : Optional[Any] =val
return orig_state_dict
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : List[Any] ) -> Union[str, Any]:
_a : Optional[int] =timm.create_model(_lowercase ,pretrained=_lowercase )
timm_model.eval()
_a : int =get_swin_config(_lowercase )
_a : Union[str, Any] =SwinForImageClassification(_lowercase )
model.eval()
_a : List[str] =convert_state_dict(timm_model.state_dict() ,_lowercase )
model.load_state_dict(_lowercase )
_a : Tuple ="""http://images.cocodataset.org/val2017/000000039769.jpg"""
_a : List[str] =AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swin_name.replace("""_""" ,"""-""" ) ) )
_a : str =Image.open(requests.get(_lowercase ,stream=_lowercase ).raw )
_a : Optional[int] =image_processor(images=_lowercase ,return_tensors="""pt""" )
_a : List[Any] =timm_model(inputs["""pixel_values"""] )
_a : List[Any] =model(**_lowercase ).logits
assert torch.allclose(_lowercase ,_lowercase ,atol=1e-3 )
print(F"Saving model {swin_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowercase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_lowercase )
if __name__ == "__main__":
A__: Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swin_name''',
default='''swin_tiny_patch4_window7_224''',
type=str,
help='''Name of the Swin timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
A__: List[Any] = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 276 |
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase :
'''simple docstring'''
def __init__( self: Optional[int] , snake_case: Any , snake_case: Optional[Any]=13 , snake_case: Tuple=32 , snake_case: Optional[int]=2 , snake_case: Tuple=3 , snake_case: Tuple=16 , snake_case: Optional[Any]=[1, 2, 1] , snake_case: Optional[int]=[2, 2, 4] , snake_case: Optional[int]=2 , snake_case: int=2.0 , snake_case: Union[str, Any]=True , snake_case: List[str]=0.0 , snake_case: List[Any]=0.0 , snake_case: Optional[Any]=0.1 , snake_case: List[Any]="gelu" , snake_case: Optional[int]=False , snake_case: Union[str, Any]=True , snake_case: Union[str, Any]=0.0_2 , snake_case: Optional[int]=1E-5 , snake_case: Optional[Any]=True , snake_case: List[Any]=None , snake_case: List[Any]=True , snake_case: Optional[Any]=10 , snake_case: str=8 , ) -> Tuple:
snake_case_ :Dict = parent
snake_case_ :Any = batch_size
snake_case_ :List[Any] = image_size
snake_case_ :List[Any] = patch_size
snake_case_ :int = num_channels
snake_case_ :Tuple = embed_dim
snake_case_ :str = depths
snake_case_ :str = num_heads
snake_case_ :Optional[int] = window_size
snake_case_ :Tuple = mlp_ratio
snake_case_ :Any = qkv_bias
snake_case_ :List[Any] = hidden_dropout_prob
snake_case_ :Optional[Any] = attention_probs_dropout_prob
snake_case_ :Union[str, Any] = drop_path_rate
snake_case_ :Any = hidden_act
snake_case_ :Optional[Any] = use_absolute_embeddings
snake_case_ :Union[str, Any] = patch_norm
snake_case_ :Dict = layer_norm_eps
snake_case_ :str = initializer_range
snake_case_ :Tuple = is_training
snake_case_ :Tuple = scope
snake_case_ :Union[str, Any] = use_labels
snake_case_ :Optional[Any] = type_sequence_label_size
snake_case_ :Dict = encoder_stride
def lowerCAmelCase_ ( self: int ) -> int:
snake_case_ :List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ :Any = None
if self.use_labels:
snake_case_ :str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ :int = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ ( self: str ) -> Union[str, Any]:
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def lowerCAmelCase_ ( self: str , snake_case: Optional[int] , snake_case: Dict , snake_case: str ) -> List[Any]:
snake_case_ :Union[str, Any] = SwinvaModel(config=snake_case )
model.to(snake_case )
model.eval()
snake_case_ :Optional[int] = model(snake_case )
snake_case_ :Optional[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
snake_case_ :int = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowerCAmelCase_ ( self: int , snake_case: List[str] , snake_case: Tuple , snake_case: int ) -> Any:
snake_case_ :Dict = SwinvaForMaskedImageModeling(config=snake_case )
model.to(snake_case )
model.eval()
snake_case_ :Tuple = model(snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
snake_case_ :List[Any] = 1
snake_case_ :int = SwinvaForMaskedImageModeling(snake_case )
model.to(snake_case )
model.eval()
snake_case_ :Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ :int = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowerCAmelCase_ ( self: List[Any] , snake_case: Any , snake_case: List[str] , snake_case: Union[str, Any] ) -> Tuple:
snake_case_ :int = self.type_sequence_label_size
snake_case_ :List[Any] = SwinvaForImageClassification(snake_case )
model.to(snake_case )
model.eval()
snake_case_ :Dict = model(snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase_ ( self: int ) -> str:
snake_case_ :Any = self.prepare_config_and_inputs()
snake_case_, snake_case_, snake_case_ :List[str] = config_and_inputs
snake_case_ :List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_A : Optional[Any] = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
_A : Any = (
{"""feature-extraction""": SwinvaModel, """image-classification""": SwinvaForImageClassification}
if is_torch_available()
else {}
)
_A : List[Any] = False
_A : List[str] = False
_A : Tuple = False
_A : List[str] = False
def lowerCAmelCase_ ( self: Dict ) -> List[Any]:
snake_case_ :Optional[int] = SwinvaModelTester(self )
snake_case_ :List[str] = ConfigTester(self , config_class=snake_case , embed_dim=37 )
def lowerCAmelCase_ ( self: Union[str, Any] ) -> List[Any]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Tuple:
snake_case_ :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
@unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" )
def lowerCAmelCase_ ( self: Union[str, Any] ) -> str:
pass
@unittest.skip(reason="""Swinv2 does not use inputs_embeds""" )
def lowerCAmelCase_ ( self: int ) -> Dict:
pass
def lowerCAmelCase_ ( self: List[str] ) -> Union[str, Any]:
snake_case_, snake_case_ :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ :Optional[int] = model_class(snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case_ :List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case , nn.Linear ) )
def lowerCAmelCase_ ( self: Dict ) -> Optional[int]:
snake_case_, snake_case_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ :Optional[int] = model_class(snake_case )
snake_case_ :List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ :int = [*signature.parameters.keys()]
snake_case_ :List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case )
def lowerCAmelCase_ ( self: List[str] ) -> Optional[Any]:
snake_case_, snake_case_ :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ :List[str] = True
for model_class in self.all_model_classes:
snake_case_ :List[Any] = True
snake_case_ :Any = False
snake_case_ :Optional[int] = True
snake_case_ :Tuple = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
snake_case_ :Any = model(**self._prepare_for_class(snake_case , snake_case ) )
snake_case_ :str = outputs.attentions
snake_case_ :Dict = len(self.model_tester.depths )
self.assertEqual(len(snake_case ) , snake_case )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case_ :Union[str, Any] = True
snake_case_ :Tuple = config.window_size**2
snake_case_ :Any = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
snake_case_ :Union[str, Any] = model(**self._prepare_for_class(snake_case , snake_case ) )
snake_case_ :int = outputs.attentions
self.assertEqual(len(snake_case ) , snake_case )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
snake_case_ :Any = len(snake_case )
# Check attention is always last and order is fine
snake_case_ :int = True
snake_case_ :Dict = True
snake_case_ :Optional[int] = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
snake_case_ :Dict = model(**self._prepare_for_class(snake_case , snake_case ) )
if hasattr(self.model_tester , """num_hidden_states_types""" ):
snake_case_ :Any = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
snake_case_ :int = 2
self.assertEqual(out_len + added_hidden_states , len(snake_case ) )
snake_case_ :str = outputs.attentions
self.assertEqual(len(snake_case ) , snake_case )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def lowerCAmelCase_ ( self: int , snake_case: Dict , snake_case: Dict , snake_case: Optional[Any] , snake_case: Dict ) -> List[str]:
snake_case_ :Dict = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
snake_case_ :Optional[int] = model(**self._prepare_for_class(snake_case , snake_case ) )
snake_case_ :str = outputs.hidden_states
snake_case_ :List[Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(snake_case ) , snake_case )
# Swinv2 has a different seq_length
snake_case_ :List[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case_ :Optional[int] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
snake_case_ :str = outputs.reshaped_hidden_states
self.assertEqual(len(snake_case ) , snake_case )
snake_case_, snake_case_, snake_case_, snake_case_ :Any = reshaped_hidden_states[0].shape
snake_case_ :int = (
reshaped_hidden_states[0].view(snake_case , snake_case , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowerCAmelCase_ ( self: Any ) -> Any:
snake_case_, snake_case_ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ :Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
snake_case_ :Union[str, Any] = True
self.check_hidden_states_output(snake_case , snake_case , snake_case , snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ :List[str] = True
self.check_hidden_states_output(snake_case , snake_case , snake_case , snake_case )
def lowerCAmelCase_ ( self: Tuple ) -> Any:
snake_case_, snake_case_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ :Optional[int] = 3
snake_case_ :Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
snake_case_ :str = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case_ :Any = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
snake_case_ :int = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
snake_case_ :str = True
self.check_hidden_states_output(snake_case , snake_case , snake_case , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ :Tuple = True
self.check_hidden_states_output(snake_case , snake_case , snake_case , (padded_height, padded_width) )
def lowerCAmelCase_ ( self: Any ) -> Tuple:
snake_case_ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*snake_case )
def lowerCAmelCase_ ( self: Optional[int] ) -> Dict:
snake_case_ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
@slow
def lowerCAmelCase_ ( self: List[Any] ) -> Dict:
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ :List[str] = SwinvaModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def lowerCAmelCase_ ( self: Optional[int] ) -> List[Any]:
snake_case_, snake_case_ :str = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ :Optional[int] = _config_zero_init(snake_case )
for model_class in self.all_model_classes:
snake_case_ :Tuple = model_class(config=snake_case )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ ( self: Optional[int] ) -> List[Any]:
return (
AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase_ ( self: List[str] ) -> List[str]:
snake_case_ :Tuple = SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to(
snake_case )
snake_case_ :str = self.default_image_processor
snake_case_ :List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
snake_case_ :str = image_processor(images=snake_case , return_tensors="""pt""" ).to(snake_case )
# forward pass
with torch.no_grad():
snake_case_ :Tuple = model(**snake_case )
# verify the logits
snake_case_ :Dict = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , snake_case )
snake_case_ :int = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1E-4 ) )
| 66 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def __UpperCAmelCase ( a_: str, a_: int ):
_UpperCAmelCase : List[Any] = list(_lowercase )
_UpperCAmelCase : str = list(_lowercase )
_UpperCAmelCase : Tuple = 0
for i in range(len(_lowercase ) ):
if lista[i] != lista[i]:
count += 1
_UpperCAmelCase : str = """_"""
if count > 1:
return False
else:
return "".join(_lowercase )
def __UpperCAmelCase ( a_: Tuple ):
_UpperCAmelCase : Optional[Any] = []
while True:
_UpperCAmelCase : Union[str, Any] = ["""$"""] * len(_lowercase )
_UpperCAmelCase : Union[str, Any] = []
for i in range(len(_lowercase ) ):
for j in range(i + 1, len(_lowercase ) ):
_UpperCAmelCase : List[Any] = compare_string(binary[i], binary[j] )
if k is False:
_UpperCAmelCase : Union[str, Any] = """*"""
_UpperCAmelCase : Any = """*"""
temp.append("X" )
for i in range(len(_lowercase ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(_lowercase ) == 0:
return pi
_UpperCAmelCase : str = list(set(_lowercase ) )
def __UpperCAmelCase ( a_: int, a_: Union[str, Any] ):
_UpperCAmelCase : Any = []
for minterm in minterms:
_UpperCAmelCase : List[Any] = """"""
for _ in range(_lowercase ):
_UpperCAmelCase : List[Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(_lowercase )
return temp
def __UpperCAmelCase ( a_: Dict, a_: List[Any], a_: List[Any] ):
_UpperCAmelCase : Dict = list(_lowercase )
_UpperCAmelCase : List[Any] = list(_lowercase )
_UpperCAmelCase : Tuple = 0
for i in range(len(_lowercase ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def __UpperCAmelCase ( a_: str, a_: Any ):
_UpperCAmelCase : int = []
_UpperCAmelCase : Dict = [0] * len(_lowercase )
for i in range(len(chart[0] ) ):
_UpperCAmelCase : Optional[int] = 0
_UpperCAmelCase : str = -1
for j in range(len(_lowercase ) ):
if chart[j][i] == 1:
count += 1
_UpperCAmelCase : List[str] = j
if count == 1:
_UpperCAmelCase : Tuple = 1
for i in range(len(_lowercase ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(_lowercase ) ):
_UpperCAmelCase : List[str] = 0
temp.append(prime_implicants[i] )
while True:
_UpperCAmelCase : Optional[int] = 0
_UpperCAmelCase : str = -1
_UpperCAmelCase : int = 0
for i in range(len(_lowercase ) ):
_UpperCAmelCase : Tuple = chart[i].count(1 )
if count_n > max_n:
_UpperCAmelCase : List[str] = count_n
_UpperCAmelCase : Dict = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(_lowercase ) ):
_UpperCAmelCase : Union[str, Any] = 0
def __UpperCAmelCase ( a_: Dict, a_: Optional[Any] ):
_UpperCAmelCase : Any = [[0 for x in range(len(_lowercase ) )] for x in range(len(_lowercase ) )]
for i in range(len(_lowercase ) ):
_UpperCAmelCase : List[Any] = prime_implicants[i].count("_" )
for j in range(len(_lowercase ) ):
if is_for_table(prime_implicants[i], binary[j], _lowercase ):
_UpperCAmelCase : Union[str, Any] = 1
return chart
def __UpperCAmelCase ( ):
_UpperCAmelCase : Optional[int] = int(input("Enter the no. of variables\n" ) )
_UpperCAmelCase : int = [
float(_lowercase )
for x in input(
"Enter the decimal representation of Minterms 'Spaces Separated'\n" ).split()
]
_UpperCAmelCase : Union[str, Any] = decimal_to_binary(_lowercase, _lowercase )
_UpperCAmelCase : str = check(_lowercase )
print("Prime Implicants are:" )
print(_lowercase )
_UpperCAmelCase : Any = prime_implicant_chart(_lowercase, _lowercase )
_UpperCAmelCase : Optional[Any] = selection(_lowercase, _lowercase )
print("Essential Prime Implicants are:" )
print(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 145 |
"""simple docstring"""
import re
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :Optional[int] = re.compile(
r"""^(?:0|94|\+94|0{2}94)""" r"""7(0|1|2|4|5|6|7|8)""" r"""(-| |)""" r"""\d{7}$""" )
return bool(re.search(_lowercase, _lowercase ) )
if __name__ == "__main__":
__a = "0094702343221"
print(is_sri_lankan_phone_number(phone))
| 66 | 0 |
"""simple docstring"""
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
__A : Tuple = logging.get_logger(__name__)
__A : Dict = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
"constant": get_constant_schedule,
"constant_w_warmup": get_constant_schedule_with_warmup,
}
class _a ( _lowerCAmelCase):
"""simple docstring"""
def __init__( self : List[str] , __UpperCamelCase : int=None , __UpperCamelCase : str=None , *__UpperCamelCase : Any , **__UpperCamelCase : List[Any] )->Optional[int]:
super().__init__(*__UpperCamelCase , **__UpperCamelCase )
if config is None:
assert isinstance(self.model , __UpperCamelCase ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F' {self.model.__class__}'
)
_UpperCAmelCase = self.model.config
else:
_UpperCAmelCase = config
_UpperCAmelCase = data_args
_UpperCAmelCase = self.config.tgt_vocab_size if isinstance(self.config , __UpperCamelCase ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F'The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'
''' padding..''' )
if self.args.label_smoothing == 0:
_UpperCAmelCase = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_UpperCAmelCase = label_smoothed_nll_loss
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : int )->str:
if self.optimizer is None:
_UpperCAmelCase = ["""bias""", """LayerNorm.weight"""]
_UpperCAmelCase = [
{
"""params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"""weight_decay""": self.args.weight_decay,
},
{
"""params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
_UpperCAmelCase = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_UpperCAmelCase = Adafactor
_UpperCAmelCase = {"""scale_parameter""": False, """relative_step""": False}
else:
_UpperCAmelCase = AdamW
_UpperCAmelCase = {
"""betas""": (self.args.adam_betaa, self.args.adam_betaa),
"""eps""": self.args.adam_epsilon,
}
_UpperCAmelCase = self.args.learning_rate
if self.sharded_ddp:
_UpperCAmelCase = OSS(
params=__UpperCamelCase , optim=__UpperCamelCase , **__UpperCamelCase , )
else:
_UpperCAmelCase = optimizer_cls(__UpperCamelCase , **__UpperCamelCase )
if self.lr_scheduler is None:
_UpperCAmelCase = self._get_lr_scheduler(__UpperCamelCase )
else: # ignoring --lr_scheduler
logger.warning('''scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.''' )
def lowercase__ ( self : Tuple , __UpperCamelCase : Optional[Any] )->str:
_UpperCAmelCase = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_UpperCAmelCase = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
_UpperCAmelCase = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
_UpperCAmelCase = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=__UpperCamelCase )
return scheduler
def lowercase__ ( self : Optional[Any] )->Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def lowercase__ ( self : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] )->Optional[Any]:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_UpperCAmelCase = model(**__UpperCamelCase , use_cache=__UpperCamelCase )[0]
_UpperCAmelCase = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
_UpperCAmelCase = model(**__UpperCamelCase , labels=__UpperCamelCase , use_cache=__UpperCamelCase )[:2]
else:
# compute label smoothed loss
_UpperCAmelCase = model(**__UpperCamelCase , use_cache=__UpperCamelCase )[0]
_UpperCAmelCase = torch.nn.functional.log_softmax(__UpperCamelCase , dim=-1 )
_UpperCAmelCase = self.loss_fn(__UpperCamelCase , __UpperCamelCase , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def lowercase__ ( self : str , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] )->List[Any]:
_UpperCAmelCase = inputs.pop('''labels''' )
_UpperCAmelCase = self._compute_loss(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return loss
def lowercase__ ( self : List[Any] , __UpperCamelCase : nn.Module , __UpperCamelCase : Dict[str, Union[torch.Tensor, Any]] , __UpperCamelCase : bool , __UpperCamelCase : Optional[List[str]] = None , )->Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
_UpperCAmelCase = self._prepare_inputs(__UpperCamelCase )
_UpperCAmelCase = {
"""max_length""": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"""num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_UpperCAmelCase = self.model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , **__UpperCamelCase , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_UpperCAmelCase = self._pad_tensors_to_max_len(__UpperCamelCase , gen_kwargs['''max_length'''] )
_UpperCAmelCase = inputs.pop('''labels''' )
with torch.no_grad():
# compute loss on predict data
_UpperCAmelCase = self._compute_loss(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_UpperCAmelCase = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_UpperCAmelCase = self._pad_tensors_to_max_len(__UpperCamelCase , gen_kwargs['''max_length'''] )
return (loss, logits, labels)
def lowercase__ ( self : str , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] )->int:
# If PAD token is not defined at least EOS token has to be defined
_UpperCAmelCase = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'''Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'''
F' padded to `max_length`={max_length}' )
_UpperCAmelCase = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
_UpperCAmelCase = tensor
return padded_tensor
| 260 |
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
__a = {
"distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"bert": (BertConfig, BertForMaskedLM, BertTokenizer),
"gpt2": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def A_ ( _lowercase ):
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
if args.student_type == "roberta":
snake_case_ :Tuple = False
elif args.student_type == "gpt2":
snake_case_ :Union[str, Any] = False
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
if args.student_type == "roberta":
snake_case_ :List[str] = False
def A_ ( ):
'''simple docstring'''
snake_case_ :Union[str, Any] = argparse.ArgumentParser(description="""Training""" )
parser.add_argument("""--force""", action="""store_true""", help="""Overwrite dump_path if it already exists.""" )
parser.add_argument(
"""--dump_path""", type=_lowercase, required=_lowercase, help="""The output directory (log, checkpoints, parameters, etc.)""" )
parser.add_argument(
"""--data_file""", type=_lowercase, required=_lowercase, help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""", )
parser.add_argument(
"""--student_type""", type=_lowercase, choices=["""distilbert""", """roberta""", """gpt2"""], required=_lowercase, help="""The student type (DistilBERT, RoBERTa).""", )
parser.add_argument("""--student_config""", type=_lowercase, required=_lowercase, help="""Path to the student configuration.""" )
parser.add_argument(
"""--student_pretrained_weights""", default=_lowercase, type=_lowercase, help="""Load student initialization checkpoint.""" )
parser.add_argument(
"""--teacher_type""", choices=["""bert""", """roberta""", """gpt2"""], required=_lowercase, help="""Teacher type (BERT, RoBERTa).""" )
parser.add_argument("""--teacher_name""", type=_lowercase, required=_lowercase, help="""The teacher model.""" )
parser.add_argument("""--temperature""", default=2.0, type=_lowercase, help="""Temperature for the softmax temperature.""" )
parser.add_argument(
"""--alpha_ce""", default=0.5, type=_lowercase, help="""Linear weight for the distillation loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_mlm""", default=0.0, type=_lowercase, help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""", )
parser.add_argument("""--alpha_clm""", default=0.5, type=_lowercase, help="""Linear weight for the CLM loss. Must be >=0.""" )
parser.add_argument("""--alpha_mse""", default=0.0, type=_lowercase, help="""Linear weight of the MSE loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_cos""", default=0.0, type=_lowercase, help="""Linear weight of the cosine embedding loss. Must be >=0.""" )
parser.add_argument(
"""--mlm""", action="""store_true""", help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""" )
parser.add_argument(
"""--mlm_mask_prop""", default=0.15, type=_lowercase, help="""Proportion of tokens for which we need to make a prediction.""", )
parser.add_argument("""--word_mask""", default=0.8, type=_lowercase, help="""Proportion of tokens to mask out.""" )
parser.add_argument("""--word_keep""", default=0.1, type=_lowercase, help="""Proportion of tokens to keep.""" )
parser.add_argument("""--word_rand""", default=0.1, type=_lowercase, help="""Proportion of tokens to randomly replace.""" )
parser.add_argument(
"""--mlm_smoothing""", default=0.7, type=_lowercase, help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""", )
parser.add_argument("""--token_counts""", type=_lowercase, help="""The token counts in the data_file for MLM.""" )
parser.add_argument(
"""--restrict_ce_to_mask""", action="""store_true""", help="""If true, compute the distillation loss only the [MLM] prediction distribution.""", )
parser.add_argument(
"""--freeze_pos_embs""", action="""store_true""", help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""", )
parser.add_argument(
"""--freeze_token_type_embds""", action="""store_true""", help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""", )
parser.add_argument("""--n_epoch""", type=_lowercase, default=3, help="""Number of pass on the whole dataset.""" )
parser.add_argument("""--batch_size""", type=_lowercase, default=5, help="""Batch size (for each process).""" )
parser.add_argument(
"""--group_by_size""", action="""store_false""", help="""If true, group sequences that have similar length into the same batch. Default is true.""", )
parser.add_argument(
"""--gradient_accumulation_steps""", type=_lowercase, default=50, help="""Gradient accumulation for larger training batches.""", )
parser.add_argument("""--warmup_prop""", default=0.05, type=_lowercase, help="""Linear warmup proportion.""" )
parser.add_argument("""--weight_decay""", default=0.0, type=_lowercase, help="""Weight decay if we apply some.""" )
parser.add_argument("""--learning_rate""", default=5e-4, type=_lowercase, help="""The initial learning rate for Adam.""" )
parser.add_argument("""--adam_epsilon""", default=1e-6, type=_lowercase, help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--max_grad_norm""", default=5.0, type=_lowercase, help="""Max gradient norm.""" )
parser.add_argument("""--initializer_range""", default=0.02, type=_lowercase, help="""Random initialization range.""" )
parser.add_argument(
"""--fp16""", action="""store_true""", help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""", )
parser.add_argument(
"""--fp16_opt_level""", type=_lowercase, default="""O1""", help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
), )
parser.add_argument("""--n_gpu""", type=_lowercase, default=1, help="""Number of GPUs in the node.""" )
parser.add_argument("""--local_rank""", type=_lowercase, default=-1, help="""Distributed training - Local rank""" )
parser.add_argument("""--seed""", type=_lowercase, default=56, help="""Random seed""" )
parser.add_argument("""--log_interval""", type=_lowercase, default=500, help="""Tensorboard logging interval.""" )
parser.add_argument("""--checkpoint_interval""", type=_lowercase, default=4000, help="""Checkpoint interval.""" )
snake_case_ :Tuple = parser.parse_args()
sanity_checks(_lowercase )
# ARGS #
init_gpu_params(_lowercase )
set_seed(_lowercase )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"""
""" itUse `--force` if you want to overwrite it""" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f"""Experiment will be dumped and logged in {args.dump_path}""" )
# SAVE PARAMS #
logger.info(f"""Param: {args}""" )
with open(os.path.join(args.dump_path, """parameters.json""" ), """w""" ) as f:
json.dump(vars(_lowercase ), _lowercase, indent=4 )
git_log(args.dump_path )
snake_case_, snake_case_, snake_case_ :Any = MODEL_CLASSES[args.student_type]
snake_case_, snake_case_, snake_case_ :int = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
snake_case_ :Any = teacher_tokenizer_class.from_pretrained(args.teacher_name )
snake_case_ :Optional[Any] = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
snake_case_ :Union[str, Any] = tokenizer.all_special_tokens.index(_lowercase )
snake_case_ :Union[str, Any] = tokenizer.all_special_ids[idx]
logger.info(f"""Special tokens {special_tok_ids}""" )
snake_case_ :str = special_tok_ids
snake_case_ :Any = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f"""Loading data from {args.data_file}""" )
with open(args.data_file, """rb""" ) as fp:
snake_case_ :str = pickle.load(_lowercase )
if args.mlm:
logger.info(f"""Loading token counts from {args.token_counts} (already pre-computed)""" )
with open(args.token_counts, """rb""" ) as fp:
snake_case_ :Optional[Any] = pickle.load(_lowercase )
snake_case_ :Tuple = np.maximum(_lowercase, 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
snake_case_ :Optional[int] = 0.0 # do not predict special tokens
snake_case_ :int = torch.from_numpy(_lowercase )
else:
snake_case_ :List[str] = None
snake_case_ :Optional[int] = LmSeqsDataset(params=_lowercase, data=_lowercase )
logger.info("""Data loader created.""" )
# STUDENT #
logger.info(f"""Loading student config from {args.student_config}""" )
snake_case_ :List[Any] = student_config_class.from_pretrained(args.student_config )
snake_case_ :Union[str, Any] = True
if args.student_pretrained_weights is not None:
logger.info(f"""Loading pretrained weights from {args.student_pretrained_weights}""" )
snake_case_ :List[str] = student_model_class.from_pretrained(args.student_pretrained_weights, config=_lowercase )
else:
snake_case_ :Optional[int] = student_model_class(_lowercase )
if args.n_gpu > 0:
student.to(f"""cuda:{args.local_rank}""" )
logger.info("""Student loaded.""" )
# TEACHER #
snake_case_ :Dict = teacher_model_class.from_pretrained(args.teacher_name, output_hidden_states=_lowercase )
if args.n_gpu > 0:
teacher.to(f"""cuda:{args.local_rank}""" )
logger.info(f"""Teacher loaded from {args.teacher_name}.""" )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(_lowercase, _lowercase )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(_lowercase, _lowercase )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
snake_case_ :Optional[int] = Distiller(
params=_lowercase, dataset=_lowercase, token_probs=_lowercase, student=_lowercase, teacher=_lowercase )
distiller.train()
logger.info("""Let's go get some drinks.""" )
if __name__ == "__main__":
main()
| 66 | 0 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
_UpperCAmelCase : Any =argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument(
"""--original_config_file""",
type=str,
required=True,
help="""The YAML config file corresponding to the original architecture.""",
)
parser.add_argument(
"""--num_in_channels""",
default=None,
type=int,
help="""The number of input channels. If `None` number of input channels will be automatically inferred.""",
)
parser.add_argument(
"""--image_size""",
default=512,
type=int,
help=(
"""The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"""
""" Base. Use 768 for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--extract_ema""",
action="""store_true""",
help=(
"""Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"""
""" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"""
""" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."""
),
)
parser.add_argument(
"""--upcast_attention""",
action="""store_true""",
help=(
"""Whether the attention computation should always be upcasted. This is necessary when running stable"""
""" diffusion 2.1."""
),
)
parser.add_argument(
"""--from_safetensors""",
action="""store_true""",
help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""",
)
parser.add_argument(
"""--to_safetensors""",
action="""store_true""",
help="""Whether to store pipeline in safetensors format or not.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[int]:
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f"""could not parse string as bool {string}""" )
parser.add_argument(
"""--use_linear_projection""", help="""Override for use linear projection""", required=False, type=parse_bool
)
parser.add_argument("""--cross_attention_dim""", help="""Override for cross attention_dim""", required=False, type=int)
_UpperCAmelCase : Optional[Any] =parser.parse_args()
_UpperCAmelCase : Any =download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) | 262 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue_model_parallelism.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_6_0_0, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_6_0_0, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
] )
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self: Any ) -> str:
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=snake_case , )
assert hasattr(self , """env""" )
def lowerCAmelCase_ ( self: int , snake_case: Dict ) -> List[Any]:
# configuration for running training on smdistributed Model Parallel
snake_case_ :Tuple = {
"""enabled""": True,
"""processes_per_host""": 8,
}
snake_case_ :List[Any] = {
"""enabled""": True,
"""parameters""": {
"""microbatches""": 4,
"""placement_strategy""": """spread""",
"""pipeline""": """interleaved""",
"""optimize""": """speed""",
"""partitions""": 4,
"""ddp""": True,
},
}
snake_case_ :Tuple = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options}
snake_case_ :Any = """trainer""" if self.script == """run_glue.py""" else """smtrainer"""
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=snake_case , instance_type=self.instance_type , debugger_hook_config=snake_case , hyperparameters={
**self.env.hyperparameters,
"""model_name_or_path""": self.model_name_or_path,
"""max_steps""": 500,
} , metric_definitions=self.env.metric_definitions , distribution=snake_case , py_version="""py36""" , )
def lowerCAmelCase_ ( self: Any , snake_case: Tuple ) -> List[str]:
TrainingJobAnalytics(snake_case ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def lowerCAmelCase_ ( self: Dict , snake_case: Dict ) -> List[Any]:
# create estimator
snake_case_ :List[Any] = self.create_estimator(snake_case )
# run training
estimator.fit()
# result dataframe
snake_case_ :Any = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
snake_case_ :Tuple = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
snake_case_ :Dict = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
snake_case_ :int = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , snake_case )
| 66 | 0 |
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def __snake_case ( __UpperCamelCase : Optional[int] ):
"""simple docstring"""
random.seed(_lowercase )
np.random.seed(_lowercase )
torch.manual_seed(_lowercase )
torch.cuda.manual_seed_all(_lowercase )
# ^^ safe to call this function even if cuda is not available
class _a :
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase : Iterable[torch.nn.Parameter] , UpperCAmelCase : float = 0.9_999 , UpperCAmelCase : float = 0.0 , UpperCAmelCase : int = 0 , UpperCAmelCase : bool = False , UpperCAmelCase : Union[float, int] = 1.0 , UpperCAmelCase : Union[float, int] = 2 / 3 , UpperCAmelCase : Optional[Any] = None , UpperCAmelCase : Dict[str, Any] = None , **UpperCAmelCase : Union[str, Any] , ):
if isinstance(UpperCAmelCase , torch.nn.Module ):
A_ = (
"""Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. """
"""Please pass the parameters of the module instead."""
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`" , "1.0.0" , UpperCAmelCase , standard_warn=UpperCAmelCase , )
A_ = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
A_ = True
if kwargs.get("max_value" , UpperCAmelCase ) is not None:
A_ = """The `max_value` argument is deprecated. Please use `decay` instead."""
deprecate("max_value" , "1.0.0" , UpperCAmelCase , standard_warn=UpperCAmelCase )
A_ = kwargs["""max_value"""]
if kwargs.get("min_value" , UpperCAmelCase ) is not None:
A_ = """The `min_value` argument is deprecated. Please use `min_decay` instead."""
deprecate("min_value" , "1.0.0" , UpperCAmelCase , standard_warn=UpperCAmelCase )
A_ = kwargs["""min_value"""]
A_ = list(UpperCAmelCase )
A_ = [p.clone().detach() for p in parameters]
if kwargs.get("device" , UpperCAmelCase ) is not None:
A_ = """The `device` argument is deprecated. Please use `to` instead."""
deprecate("device" , "1.0.0" , UpperCAmelCase , standard_warn=UpperCAmelCase )
self.to(device=kwargs["device"] )
A_ = None
A_ = decay
A_ = min_decay
A_ = update_after_step
A_ = use_ema_warmup
A_ = inv_gamma
A_ = power
A_ = 0
A_ = None # set in `step()`
A_ = model_cls
A_ = model_config
@classmethod
def __A ( cls : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Dict ):
A_ = model_cls.load_config(UpperCAmelCase , return_unused_kwargs=UpperCAmelCase )
A_ = model_cls.from_pretrained(UpperCAmelCase )
A_ = cls(model.parameters() , model_cls=UpperCAmelCase , model_config=model.config )
ema_model.load_state_dict(UpperCAmelCase )
return ema_model
def __A ( self : Optional[Any] , UpperCAmelCase : List[Any] ):
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
A_ = self.model_cls.from_config(self.model_config )
A_ = self.state_dict()
state_dict.pop("shadow_params" , UpperCAmelCase )
model.register_to_config(**UpperCAmelCase )
self.copy_to(model.parameters() )
model.save_pretrained(UpperCAmelCase )
def __A ( self : Dict , UpperCAmelCase : int ):
A_ = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
A_ = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
A_ = (1 + step) / (10 + step)
A_ = min(UpperCAmelCase , self.decay )
# make sure decay is not smaller than min_decay
A_ = max(UpperCAmelCase , self.min_decay )
return cur_decay_value
@torch.no_grad()
def __A ( self : int , UpperCAmelCase : Iterable[torch.nn.Parameter] ):
if isinstance(UpperCAmelCase , torch.nn.Module ):
A_ = (
"""Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. """
"""Please pass the parameters of the module instead."""
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`" , "1.0.0" , UpperCAmelCase , standard_warn=UpperCAmelCase , )
A_ = parameters.parameters()
A_ = list(UpperCAmelCase )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
A_ = self.get_decay(self.optimization_step )
A_ = decay
A_ = 1 - decay
A_ = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , UpperCAmelCase ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
A_ = deepspeed.zero.GatheredParameters(UpperCAmelCase , modifier_rank=UpperCAmelCase )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(UpperCAmelCase )
def __A ( self : int , UpperCAmelCase : Iterable[torch.nn.Parameter] ):
A_ = list(UpperCAmelCase )
for s_param, param in zip(self.shadow_params , UpperCAmelCase ):
param.data.copy_(s_param.to(param.device ).data )
def __A ( self : str , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : List[str]=None ):
A_ = [
p.to(device=UpperCAmelCase , dtype=UpperCAmelCase ) if p.is_floating_point() else p.to(device=UpperCAmelCase )
for p in self.shadow_params
]
def __A ( self : Union[str, Any] ):
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def __A ( self : List[Any] , UpperCAmelCase : Iterable[torch.nn.Parameter] ):
A_ = [param.detach().cpu().clone() for param in parameters]
def __A ( self : int , UpperCAmelCase : Iterable[torch.nn.Parameter] ):
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params , UpperCAmelCase ):
param.data.copy_(c_param.data )
# Better memory-wise.
A_ = None
def __A ( self : Tuple , UpperCAmelCase : dict ):
A_ = copy.deepcopy(UpperCAmelCase )
A_ = state_dict.get("decay" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
A_ = state_dict.get("min_decay" , self.min_decay )
if not isinstance(self.min_decay , UpperCAmelCase ):
raise ValueError("Invalid min_decay" )
A_ = state_dict.get("optimization_step" , self.optimization_step )
if not isinstance(self.optimization_step , UpperCAmelCase ):
raise ValueError("Invalid optimization_step" )
A_ = state_dict.get("update_after_step" , self.update_after_step )
if not isinstance(self.update_after_step , UpperCAmelCase ):
raise ValueError("Invalid update_after_step" )
A_ = state_dict.get("use_ema_warmup" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , UpperCAmelCase ):
raise ValueError("Invalid use_ema_warmup" )
A_ = state_dict.get("inv_gamma" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("Invalid inv_gamma" )
A_ = state_dict.get("power" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("Invalid power" )
A_ = state_dict.get("shadow_params" , UpperCAmelCase )
if shadow_params is not None:
A_ = shadow_params
if not isinstance(self.shadow_params , UpperCAmelCase ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(UpperCAmelCase , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" ) | 312 |
"""simple docstring"""
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class lowerCamelCase :
'''simple docstring'''
def __init__( self: Dict , snake_case: Optional[Any] , snake_case: Tuple=13 , snake_case: Any=32 , snake_case: Union[str, Any]=2 , snake_case: Tuple=3 , snake_case: Union[str, Any]=16 , snake_case: Union[str, Any]=[1, 2, 1] , snake_case: Optional[Any]=[2, 2, 4] , snake_case: str=2 , snake_case: List[str]=2.0 , snake_case: Optional[int]=True , snake_case: Union[str, Any]=0.0 , snake_case: Optional[int]=0.0 , snake_case: Optional[Any]=0.1 , snake_case: List[str]="gelu" , snake_case: Any=False , snake_case: Optional[Any]=True , snake_case: Optional[int]=0.0_2 , snake_case: Any=1E-5 , snake_case: Optional[int]=True , snake_case: int=None , snake_case: Any=True , snake_case: str=10 , snake_case: Optional[Any]=8 , snake_case: Union[str, Any]=["stage1", "stage2", "stage3"] , snake_case: Tuple=[1, 2, 3] , ) -> Dict:
snake_case_ :Dict = parent
snake_case_ :List[Any] = batch_size
snake_case_ :Dict = image_size
snake_case_ :Dict = patch_size
snake_case_ :Tuple = num_channels
snake_case_ :List[Any] = embed_dim
snake_case_ :List[str] = depths
snake_case_ :str = num_heads
snake_case_ :Tuple = window_size
snake_case_ :Tuple = mlp_ratio
snake_case_ :int = qkv_bias
snake_case_ :Tuple = hidden_dropout_prob
snake_case_ :Optional[Any] = attention_probs_dropout_prob
snake_case_ :Dict = drop_path_rate
snake_case_ :Any = hidden_act
snake_case_ :Any = use_absolute_embeddings
snake_case_ :int = patch_norm
snake_case_ :List[Any] = layer_norm_eps
snake_case_ :Tuple = initializer_range
snake_case_ :str = is_training
snake_case_ :int = scope
snake_case_ :Tuple = use_labels
snake_case_ :Tuple = type_sequence_label_size
snake_case_ :str = encoder_stride
snake_case_ :List[Any] = out_features
snake_case_ :str = out_indices
def lowerCAmelCase_ ( self: Tuple ) -> Dict:
snake_case_ :Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ :str = None
if self.use_labels:
snake_case_ :Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ :Union[str, Any] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ ( self: int ) -> Optional[Any]:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def lowerCAmelCase_ ( self: List[Any] , snake_case: str , snake_case: int , snake_case: List[str] ) -> Any:
snake_case_ :Dict = MaskFormerSwinModel(config=snake_case )
model.to(snake_case )
model.eval()
snake_case_ :Tuple = model(snake_case )
snake_case_ :Dict = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
snake_case_ :Any = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowerCAmelCase_ ( self: Optional[Any] , snake_case: int , snake_case: List[str] , snake_case: Tuple ) -> Union[str, Any]:
snake_case_ :Any = MaskFormerSwinBackbone(config=snake_case )
model.to(snake_case )
model.eval()
snake_case_ :Optional[Any] = model(snake_case )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(snake_case ):
snake_case_ :Optional[Any] = ["""stem"""]
snake_case_ :str = MaskFormerSwinBackbone(config=snake_case )
def lowerCAmelCase_ ( self: List[str] ) -> Optional[Any]:
snake_case_ :Optional[int] = self.prepare_config_and_inputs()
snake_case_, snake_case_, snake_case_ :str = config_and_inputs
snake_case_ :Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_A : Union[str, Any] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
_A : str = {"""feature-extraction""": MaskFormerSwinModel} if is_torch_available() else {}
_A : List[str] = False
_A : Any = False
_A : Dict = False
_A : List[Any] = False
_A : Optional[int] = False
def lowerCAmelCase_ ( self: Dict ) -> Any:
snake_case_ :str = MaskFormerSwinModelTester(self )
snake_case_ :Optional[Any] = ConfigTester(self , config_class=snake_case , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def lowerCAmelCase_ ( self: List[str] ) -> Optional[int]:
pass
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase_ ( self: Any ) -> Tuple:
return
def lowerCAmelCase_ ( self: Any ) -> Any:
snake_case_ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def lowerCAmelCase_ ( self: Union[str, Any] ) -> int:
snake_case_ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*snake_case )
@unittest.skip("""Swin does not use inputs_embeds""" )
def lowerCAmelCase_ ( self: str ) -> List[str]:
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def lowerCAmelCase_ ( self: int ) -> Optional[int]:
pass
def lowerCAmelCase_ ( self: List[str] ) -> List[Any]:
snake_case_, snake_case_ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ :str = model_class(snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case_ :Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case , nn.Linear ) )
def lowerCAmelCase_ ( self: Tuple ) -> Dict:
snake_case_, snake_case_ :int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ :Optional[int] = model_class(snake_case )
snake_case_ :str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ :str = [*signature.parameters.keys()]
snake_case_ :str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def lowerCAmelCase_ ( self: List[Any] ) -> List[Any]:
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def lowerCAmelCase_ ( self: Dict ) -> List[Any]:
pass
def lowerCAmelCase_ ( self: Union[str, Any] , snake_case: Union[str, Any] , snake_case: int , snake_case: Any , snake_case: List[str] ) -> str:
snake_case_ :List[str] = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
snake_case_ :List[Any] = model(**self._prepare_for_class(snake_case , snake_case ) )
snake_case_ :Any = outputs.hidden_states
snake_case_ :Optional[int] = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(snake_case ) , snake_case )
# Swin has a different seq_length
snake_case_ :str = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case_ :int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowerCAmelCase_ ( self: List[Any] ) -> Optional[int]:
snake_case_, snake_case_ :Any = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ :List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
snake_case_ :Tuple = True
self.check_hidden_states_output(snake_case , snake_case , snake_case , snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ :List[Any] = True
self.check_hidden_states_output(snake_case , snake_case , snake_case , snake_case )
def lowerCAmelCase_ ( self: Optional[Any] ) -> Tuple:
snake_case_, snake_case_ :int = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ :List[Any] = 3
snake_case_ :List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
snake_case_ :Any = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case_ :Tuple = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
snake_case_ :List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
snake_case_ :str = True
self.check_hidden_states_output(snake_case , snake_case , snake_case , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ :Any = True
self.check_hidden_states_output(snake_case , snake_case , snake_case , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def lowerCAmelCase_ ( self: Union[str, Any] ) -> List[str]:
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def lowerCAmelCase_ ( self: List[str] ) -> str:
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def lowerCAmelCase_ ( self: str ) -> List[Any]:
pass
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Optional[Any]:
snake_case_, snake_case_ :Dict = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(snake_case: str ):
snake_case_ :Optional[int] = 0
return t
def check_equivalence(snake_case: List[Any] , snake_case: Union[str, Any] , snake_case: int , snake_case: Tuple={} ):
with torch.no_grad():
snake_case_ :List[Any] = model(**snake_case , return_dict=snake_case , **snake_case )
snake_case_ :Any = model(**snake_case , return_dict=snake_case , **snake_case ).to_tuple()
def recursive_check(snake_case: List[Any] , snake_case: int ):
if isinstance(snake_case , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(snake_case , snake_case ):
recursive_check(snake_case , snake_case )
elif isinstance(snake_case , snake_case ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(snake_case , snake_case )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(snake_case ) , set_nan_tensor_to_zero(snake_case ) , atol=1E-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
f""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"""
f""" {torch.isnan(snake_case ).any()} and `inf`: {torch.isinf(snake_case )}. Dict has"""
f""" `nan`: {torch.isnan(snake_case ).any()} and `inf`: {torch.isinf(snake_case )}."""
) , )
recursive_check(snake_case , snake_case )
for model_class in self.all_model_classes:
snake_case_ :int = model_class(snake_case )
model.to(snake_case )
model.eval()
snake_case_ :Any = self._prepare_for_class(snake_case , snake_case )
snake_case_ :List[Any] = self._prepare_for_class(snake_case , snake_case )
check_equivalence(snake_case , snake_case , snake_case )
snake_case_ :Tuple = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
snake_case_ :Dict = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
check_equivalence(snake_case , snake_case , snake_case )
snake_case_ :Tuple = self._prepare_for_class(snake_case , snake_case )
snake_case_ :Any = self._prepare_for_class(snake_case , snake_case )
check_equivalence(snake_case , snake_case , snake_case , {"""output_hidden_states""": True} )
snake_case_ :Dict = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
snake_case_ :List[str] = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
check_equivalence(snake_case , snake_case , snake_case , {"""output_hidden_states""": True} )
@require_torch
class lowerCamelCase ( unittest.TestCase , _lowerCAmelCase ):
'''simple docstring'''
_A : int = (MaskFormerSwinBackbone,) if is_torch_available() else ()
_A : Tuple = MaskFormerSwinConfig
def lowerCAmelCase_ ( self: List[str] ) -> Optional[int]:
snake_case_ :Optional[Any] = MaskFormerSwinModelTester(self )
def lowerCAmelCase_ ( self: int ) -> Optional[int]:
snake_case_, snake_case_ :Any = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ :Tuple = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
snake_case_ :List[str] = backbone_class(snake_case )
backbone.to(snake_case )
backbone.eval()
snake_case_ :List[Any] = backbone(**snake_case )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , snake_case )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
snake_case_ :Union[str, Any] = backbone(**snake_case , output_hidden_states=snake_case )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
snake_case_, snake_case_, snake_case_ :List[Any] = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
snake_case_ :List[Any] = backbone(**snake_case , output_attentions=snake_case )
self.assertIsNotNone(outputs.attentions )
| 66 | 0 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = """▁"""
lowerCamelCase__ = {
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
}
lowerCamelCase__ = {
"""vocab_file""": {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json"""
),
},
"""spm_file""": {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model"""
)
},
}
lowerCamelCase__ = {
"""facebook/s2t-small-librispeech-asr""": 1024,
}
lowerCamelCase__ = ["""pt""", """fr""", """ru""", """nl""", """ro""", """it""", """es""", """de"""]
lowerCamelCase__ = {"""mustc""": MUSTC_LANGS}
class A__ ( _lowerCAmelCase ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = MAX_MODEL_INPUT_SIZES
lowercase = ["""input_ids""", """attention_mask"""]
lowercase = []
def __init__( self : Dict , a : List[str] , a : Tuple , a : List[Any]="<s>" , a : List[Any]="</s>" , a : Optional[int]="<pad>" , a : Any="<unk>" , a : Tuple=False , a : List[Any]=False , a : int=None , a : Optional[Any]=None , a : Optional[Dict[str, Any]] = None , **a : Tuple , ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a , eos_token=a , unk_token=a , pad_token=a , do_upper_case=a , do_lower_case=a , tgt_lang=a , lang_codes=a , sp_model_kwargs=self.sp_model_kwargs , **a , )
lowerCAmelCase__ : Union[str, Any] = do_upper_case
lowerCAmelCase__ : int = do_lower_case
lowerCAmelCase__ : List[str] = load_json(a )
lowerCAmelCase__ : Union[str, Any] = {v: k for k, v in self.encoder.items()}
lowerCAmelCase__ : Optional[int] = spm_file
lowerCAmelCase__ : List[str] = load_spm(a , self.sp_model_kwargs )
if lang_codes is not None:
lowerCAmelCase__ : Tuple = lang_codes
lowerCAmelCase__ : List[Any] = LANGUAGES[lang_codes]
lowerCAmelCase__ : Union[str, Any] = [f'''<lang:{lang}>''' for lang in self.langs]
lowerCAmelCase__ : str = {lang: self.sp_model.PieceToId(f'''<lang:{lang}>''' ) for lang in self.langs}
lowerCAmelCase__ : Optional[int] = self.lang_tokens
lowerCAmelCase__ : Dict = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
lowerCAmelCase__ : int = {}
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return len(self.encoder )
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
return self._tgt_lang
@tgt_lang.setter
def _lowerCamelCase ( self : str , a : str ):
'''simple docstring'''
lowerCAmelCase__ : Any = new_tgt_lang
self.set_tgt_lang_special_tokens(a )
def _lowerCamelCase ( self : Dict , a : str ):
'''simple docstring'''
lowerCAmelCase__ : str = self.lang_code_to_id[tgt_lang]
lowerCAmelCase__ : List[Any] = [lang_code_id]
def _lowerCamelCase ( self : int , a : str ):
'''simple docstring'''
return self.sp_model.encode(a , out_type=a )
def _lowerCamelCase ( self : Optional[Any] , a : Optional[int] ):
'''simple docstring'''
return self.encoder.get(a , self.encoder[self.unk_token] )
def _lowerCamelCase ( self : Union[str, Any] , a : int ):
'''simple docstring'''
return self.decoder.get(a , self.unk_token )
def _lowerCamelCase ( self : Dict , a : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = []
lowerCAmelCase__ : Union[str, Any] = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
lowerCAmelCase__ : Any = self.sp_model.decode(a )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
lowerCAmelCase__ : List[str] = []
else:
current_sub_tokens.append(a )
lowerCAmelCase__ : Any = self.sp_model.decode(a )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def _lowerCamelCase ( self : Union[str, Any] , a : str , a : Any=None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def _lowerCamelCase ( self : Tuple , a : List[int] , a : Optional[List[int]] = None , a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
lowerCAmelCase__ : Union[str, Any] = [1] * len(self.prefix_tokens )
lowerCAmelCase__ : Any = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(a )) + suffix_ones
return prefix_ones + ([0] * len(a )) + ([0] * len(a )) + suffix_ones
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.__dict__.copy()
lowerCAmelCase__ : List[Any] = None
return state
def __setstate__( self : Union[str, Any] , a : Dict ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCAmelCase__ : int = {}
lowerCAmelCase__ : Optional[Any] = load_spm(self.spm_file , self.sp_model_kwargs )
def _lowerCamelCase ( self : Union[str, Any] , a : str , a : Optional[str] = None ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = Path(a )
assert save_dir.is_dir(), f'''{save_directory} should be a directory'''
lowerCAmelCase__ : int = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
lowerCAmelCase__ : Union[str, Any] = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder , a )
if os.path.abspath(self.spm_file ) != os.path.abspath(a ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , a )
elif not os.path.isfile(self.spm_file ):
with open(a , 'wb' ) as fi:
lowerCAmelCase__ : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(a )
return (str(a ), str(a ))
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
lowerCAmelCase__ : Any = sentencepiece.SentencePieceProcessor(**_lowercase )
spm.Load(str(_lowercase ) )
return spm
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> int:
with open(_lowercase , 'r' ) as f:
return json.load(_lowercase )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
with open(_lowercase , 'w' ) as f:
json.dump(_lowercase , _lowercase , indent=2 ) | 212 |
"""simple docstring"""
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
__a = logging.get_logger(__name__)
enable_full_determinism()
class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_A : List[Any] = UNetaDModel
_A : Union[str, Any] = """sample"""
@property
def lowerCAmelCase_ ( self: str ) -> Tuple:
snake_case_ :List[str] = 4
snake_case_ :Tuple = 3
snake_case_ :Optional[Any] = (32, 32)
snake_case_ :str = floats_tensor((batch_size, num_channels) + sizes ).to(snake_case )
snake_case_ :Union[str, Any] = torch.tensor([10] ).to(snake_case )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase_ ( self: List[str] ) -> Dict:
return (3, 32, 32)
@property
def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[int]:
return (3, 32, 32)
def lowerCAmelCase_ ( self: Optional[int] ) -> Dict:
snake_case_ :Any = {
"""block_out_channels""": (32, 64),
"""down_block_types""": ("""DownBlock2D""", """AttnDownBlock2D"""),
"""up_block_types""": ("""AttnUpBlock2D""", """UpBlock2D"""),
"""attention_head_dim""": 3,
"""out_channels""": 3,
"""in_channels""": 3,
"""layers_per_block""": 2,
"""sample_size""": 32,
}
snake_case_ :Tuple = self.dummy_input
return init_dict, inputs_dict
class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_A : List[str] = UNetaDModel
_A : Union[str, Any] = """sample"""
@property
def lowerCAmelCase_ ( self: str ) -> str:
snake_case_ :List[str] = 4
snake_case_ :Optional[int] = 4
snake_case_ :int = (32, 32)
snake_case_ :Any = floats_tensor((batch_size, num_channels) + sizes ).to(snake_case )
snake_case_ :List[Any] = torch.tensor([10] ).to(snake_case )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[int]:
return (4, 32, 32)
@property
def lowerCAmelCase_ ( self: List[Any] ) -> int:
return (4, 32, 32)
def lowerCAmelCase_ ( self: Union[str, Any] ) -> List[Any]:
snake_case_ :Dict = {
"""sample_size""": 32,
"""in_channels""": 4,
"""out_channels""": 4,
"""layers_per_block""": 2,
"""block_out_channels""": (32, 64),
"""attention_head_dim""": 32,
"""down_block_types""": ("""DownBlock2D""", """DownBlock2D"""),
"""up_block_types""": ("""UpBlock2D""", """UpBlock2D"""),
}
snake_case_ :List[str] = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[Any]:
snake_case_, snake_case_ :List[str] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=snake_case )
self.assertIsNotNone(snake_case )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(snake_case )
snake_case_ :List[str] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def lowerCAmelCase_ ( self: Tuple ) -> Dict:
snake_case_, snake_case_ :Union[str, Any] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=snake_case )
model.to(snake_case )
snake_case_ :Union[str, Any] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def lowerCAmelCase_ ( self: str ) -> Any:
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
snake_case_, snake_case_ :List[str] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=snake_case )
model_accelerate.to(snake_case )
model_accelerate.eval()
snake_case_ :List[Any] = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
snake_case_ :int = noise.to(snake_case )
snake_case_ :str = torch.tensor([10] * noise.shape[0] ).to(snake_case )
snake_case_ :Optional[int] = model_accelerate(snake_case , snake_case )["""sample"""]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
snake_case_, snake_case_ :str = UNetaDModel.from_pretrained(
"""fusing/unet-ldm-dummy-update""" , output_loading_info=snake_case , low_cpu_mem_usage=snake_case )
model_normal_load.to(snake_case )
model_normal_load.eval()
snake_case_ :int = model_normal_load(snake_case , snake_case )["""sample"""]
assert torch_all_close(snake_case , snake_case , rtol=1E-3 )
def lowerCAmelCase_ ( self: Tuple ) -> Any:
snake_case_ :Tuple = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" )
model.eval()
model.to(snake_case )
snake_case_ :Optional[int] = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
snake_case_ :int = noise.to(snake_case )
snake_case_ :List[Any] = torch.tensor([10] * noise.shape[0] ).to(snake_case )
with torch.no_grad():
snake_case_ :Union[str, Any] = model(snake_case , snake_case ).sample
snake_case_ :Optional[int] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
snake_case_ :Dict = torch.tensor([-1_3.3_2_5_8, -2_0.1_1_0_0, -1_5.9_8_7_3, -1_7.6_6_1_7, -2_3.0_5_9_6, -1_7.9_4_1_9, -1_3.3_6_7_5, -1_6.1_8_8_9, -1_2.3_8_0_0] )
# fmt: on
self.assertTrue(torch_all_close(snake_case , snake_case , rtol=1E-3 ) )
class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_A : List[Any] = UNetaDModel
_A : List[Any] = """sample"""
@property
def lowerCAmelCase_ ( self: Union[str, Any] , snake_case: int=(32, 32) ) -> Tuple:
snake_case_ :Union[str, Any] = 4
snake_case_ :Any = 3
snake_case_ :int = floats_tensor((batch_size, num_channels) + sizes ).to(snake_case )
snake_case_ :Any = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=snake_case )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Any:
return (3, 32, 32)
@property
def lowerCAmelCase_ ( self: int ) -> Tuple:
return (3, 32, 32)
def lowerCAmelCase_ ( self: List[str] ) -> Tuple:
snake_case_ :List[Any] = {
"""block_out_channels""": [32, 64, 64, 64],
"""in_channels""": 3,
"""layers_per_block""": 1,
"""out_channels""": 3,
"""time_embedding_type""": """fourier""",
"""norm_eps""": 1E-6,
"""mid_block_scale_factor""": math.sqrt(2.0 ),
"""norm_num_groups""": None,
"""down_block_types""": [
"""SkipDownBlock2D""",
"""AttnSkipDownBlock2D""",
"""SkipDownBlock2D""",
"""SkipDownBlock2D""",
],
"""up_block_types""": [
"""SkipUpBlock2D""",
"""SkipUpBlock2D""",
"""AttnSkipUpBlock2D""",
"""SkipUpBlock2D""",
],
}
snake_case_ :int = self.dummy_input
return init_dict, inputs_dict
@slow
def lowerCAmelCase_ ( self: Optional[Any] ) -> List[Any]:
snake_case_, snake_case_ :List[Any] = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" , output_loading_info=snake_case )
self.assertIsNotNone(snake_case )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(snake_case )
snake_case_ :Any = self.dummy_input
snake_case_ :int = floats_tensor((4, 3) + (256, 256) ).to(snake_case )
snake_case_ :int = noise
snake_case_ :int = model(**snake_case )
assert image is not None, "Make sure output is not None"
@slow
def lowerCAmelCase_ ( self: str ) -> Dict:
snake_case_ :Dict = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" )
model.to(snake_case )
snake_case_ :List[str] = 4
snake_case_ :Optional[int] = 3
snake_case_ :List[str] = (256, 256)
snake_case_ :Tuple = torch.ones((batch_size, num_channels) + sizes ).to(snake_case )
snake_case_ :str = torch.tensor(batch_size * [1E-4] ).to(snake_case )
with torch.no_grad():
snake_case_ :Dict = model(snake_case , snake_case ).sample
snake_case_ :int = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
snake_case_ :Optional[Any] = torch.tensor([-4_8_4_2.8_6_9_1, -6_4_9_9.6_6_3_1, -3_8_0_0.1_9_5_3, -7_9_7_8.2_6_8_6, -1_0_9_8_0.7_1_2_9, -2_0_0_2_8.8_5_3_5, 8_1_4_8.2_8_2_2, 2_3_4_2.2_9_0_5, 5_6_7.7_6_0_8] )
# fmt: on
self.assertTrue(torch_all_close(snake_case , snake_case , rtol=1E-2 ) )
def lowerCAmelCase_ ( self: List[str] ) -> List[Any]:
snake_case_ :Optional[Any] = UNetaDModel.from_pretrained("""fusing/ncsnpp-ffhq-ve-dummy-update""" )
model.to(snake_case )
snake_case_ :Optional[int] = 4
snake_case_ :Optional[Any] = 3
snake_case_ :Optional[Any] = (32, 32)
snake_case_ :Dict = torch.ones((batch_size, num_channels) + sizes ).to(snake_case )
snake_case_ :Any = torch.tensor(batch_size * [1E-4] ).to(snake_case )
with torch.no_grad():
snake_case_ :str = model(snake_case , snake_case ).sample
snake_case_ :int = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
snake_case_ :int = torch.tensor([-0.0_3_2_5, -0.0_9_0_0, -0.0_8_6_9, -0.0_3_3_2, -0.0_7_2_5, -0.0_2_7_0, -0.0_1_0_1, 0.0_2_2_7, 0.0_2_5_6] )
# fmt: on
self.assertTrue(torch_all_close(snake_case , snake_case , rtol=1E-2 ) )
def lowerCAmelCase_ ( self: Dict ) -> Optional[Any]:
# not required for this model
pass
| 66 | 0 |
import math
def lowerCamelCase__ ( A__ : List[str] , A__ : Any ):
'''simple docstring'''
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(_lowercase )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("""This should never happen""" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
UpperCAmelCase_ = 'Enter the base and the power separated by a comma: '
UpperCAmelCase_ , UpperCAmelCase_ = map(int, input(prompt).split(','))
UpperCAmelCase_ , UpperCAmelCase_ = map(int, input(prompt).split(','))
# We find the log of each number, using the function res(), which takes two
# arguments.
UpperCAmelCase_ = res(xa, ya)
UpperCAmelCase_ = res(xa, ya)
# We check for the largest number
if resa > resa:
print('Largest number is', xa, '^', ya)
elif resa > resa:
print('Largest number is', xa, '^', ya)
else:
print('Both are equal')
| 12 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__a = {
"configuration_mask2former": [
"MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Mask2FormerConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["Mask2FormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"Mask2FormerForUniversalSegmentation",
"Mask2FormerModel",
"Mask2FormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 66 | 0 |
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
a__ : Any = logging.getLogger(__name__)
class a_ ( _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = """summarization"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = ["""loss"""]
__SCREAMING_SNAKE_CASE : Dict = ROUGE_KEYS
__SCREAMING_SNAKE_CASE : Tuple = """rouge2"""
def __init__( self , _lowerCamelCase , **_lowerCamelCase ) ->str:
if hparams.sortish_sampler and hparams.gpus > 1:
SCREAMING_SNAKE_CASE : List[str] = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('''Dynamic Batch size does not work for multi-gpu training''' )
if hparams.sortish_sampler:
raise ValueError('''--sortish_sampler and --max_tokens_per_batch may not be used simultaneously''' )
super().__init__(_lowerCamelCase , num_labels=_lowerCamelCase , mode=self.mode , **_lowerCamelCase )
use_task_specific_params(self.model , '''summarization''' )
save_git_info(self.hparams.output_dir )
SCREAMING_SNAKE_CASE : Dict = Path(self.output_dir ) / """metrics.json"""
SCREAMING_SNAKE_CASE : Dict = Path(self.output_dir ) / """hparams.pkl"""
pickle_save(self.hparams , self.hparams_save_path )
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : List[Any] = defaultdict(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = self.config.model_type
SCREAMING_SNAKE_CASE : Any = self.config.tgt_vocab_size if self.model_type == """fsmt""" else self.config.vocab_size
SCREAMING_SNAKE_CASE : dict = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
SCREAMING_SNAKE_CASE : List[Any] = {
"""train""": self.hparams.n_train,
"""val""": self.hparams.n_val,
"""test""": self.hparams.n_test,
}
SCREAMING_SNAKE_CASE : Tuple = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
SCREAMING_SNAKE_CASE : int = {
"""train""": self.hparams.max_target_length,
"""val""": self.hparams.val_max_target_length,
"""test""": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F"""target_lens: {self.target_lens}"""
assert self.target_lens["train"] <= self.target_lens["test"], F"""target_lens: {self.target_lens}"""
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
SCREAMING_SNAKE_CASE : Dict = get_git_info()["""repo_sha"""]
SCREAMING_SNAKE_CASE : Optional[int] = hparams.num_workers
SCREAMING_SNAKE_CASE : Dict = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
SCREAMING_SNAKE_CASE : Any = self.decoder_start_token_id
SCREAMING_SNAKE_CASE : Optional[Any] = (
SeqaSeqDataset if hasattr(self.tokenizer , '''prepare_seq2seq_batch''' ) else LegacySeqaSeqDataset
)
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : Any = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.hparams.eval_max_gen_length
else:
SCREAMING_SNAKE_CASE : Optional[Any] = self.model.config.max_length
SCREAMING_SNAKE_CASE : Tuple = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Dict[str, List[str]]:
SCREAMING_SNAKE_CASE : List[str] = {
k: self.tokenizer.batch_decode(v.tolist() ) if """mask""" not in k else v.shape for k, v in batch.items()
}
save_json(_lowerCamelCase , Path(self.output_dir ) / '''text_batch.json''' )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / '''tok_batch.json''' )
SCREAMING_SNAKE_CASE : int = True
return readable_batch
def __lowerCAmelCase ( self , _lowerCamelCase , **_lowerCamelCase ) ->List[Any]:
return self.model(_lowerCamelCase , **_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Dict:
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer.batch_decode(
_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
return lmap(str.strip , _lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Tuple:
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer.pad_token_id
SCREAMING_SNAKE_CASE : List[str] = batch["""input_ids"""], batch["""attention_mask"""]
SCREAMING_SNAKE_CASE : List[Any] = batch["""labels"""]
if isinstance(self.model , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : Dict = self.model._shift_right(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : List[Any] = shift_tokens_right(_lowerCamelCase , _lowerCamelCase )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
SCREAMING_SNAKE_CASE : Optional[Any] = decoder_input_ids
self.save_readable_batch(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = self(_lowerCamelCase , attention_mask=_lowerCamelCase , decoder_input_ids=_lowerCamelCase , use_cache=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = outputs["""logits"""]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
SCREAMING_SNAKE_CASE : Optional[int] = nn.CrossEntropyLoss(ignore_index=_lowerCamelCase )
assert lm_logits.shape[-1] == self.vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
SCREAMING_SNAKE_CASE : Dict = nn.functional.log_softmax(_lowerCamelCase , dim=-1 )
SCREAMING_SNAKE_CASE : Dict = label_smoothed_nll_loss(
_lowerCamelCase , _lowerCamelCase , self.hparams.label_smoothing , ignore_index=_lowerCamelCase )
return (loss,)
@property
def __lowerCAmelCase ( self ) ->int:
return self.tokenizer.pad_token_id
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Dict:
SCREAMING_SNAKE_CASE : Optional[Any] = self._step(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = dict(zip(self.loss_names , _lowerCamelCase ) )
# tokens per batch
SCREAMING_SNAKE_CASE : List[Any] = batch["""input_ids"""].ne(self.pad ).sum() + batch["""labels"""].ne(self.pad ).sum()
SCREAMING_SNAKE_CASE : List[Any] = batch["""input_ids"""].shape[0]
SCREAMING_SNAKE_CASE : str = batch["""input_ids"""].eq(self.pad ).sum()
SCREAMING_SNAKE_CASE : Optional[Any] = batch["""input_ids"""].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Dict:
return self._generative_step(_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase="val" ) ->Dict:
self.step_count += 1
SCREAMING_SNAKE_CASE : int = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
SCREAMING_SNAKE_CASE : List[Any] = losses["""loss"""]
SCREAMING_SNAKE_CASE : Tuple = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["""gen_time""", """gen_len"""]
}
SCREAMING_SNAKE_CASE : Optional[Any] = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
SCREAMING_SNAKE_CASE : torch.FloatTensor = torch.tensor(_lowerCamelCase ).type_as(_lowerCamelCase )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = {F"""{prefix}_avg_{k}""": x for k, x in losses.items()}
SCREAMING_SNAKE_CASE : Optional[int] = self.step_count
self.metrics[prefix].append(_lowerCamelCase ) # callback writes this to self.metrics_save_path
SCREAMING_SNAKE_CASE : Any = flatten_list([x['''preds'''] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F"""{prefix}_loss""": loss,
F"""{prefix}_{self.val_metric}""": metric_tensor,
}
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Dict:
return calculate_rouge(_lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->dict:
SCREAMING_SNAKE_CASE : str = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
SCREAMING_SNAKE_CASE : Tuple = self.model.generate(
batch['''input_ids'''] , attention_mask=batch['''attention_mask'''] , use_cache=_lowerCamelCase , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
SCREAMING_SNAKE_CASE : List[str] = (time.time() - ta) / batch["""input_ids"""].shape[0]
SCREAMING_SNAKE_CASE : List[str] = self.ids_to_clean_text(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = self.ids_to_clean_text(batch['''labels'''] )
SCREAMING_SNAKE_CASE : Dict = self._step(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = dict(zip(self.loss_names , _lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Dict = self.calc_generative_metrics(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = np.mean(lmap(_lowerCamelCase , _lowerCamelCase ) )
base_metrics.update(gen_time=_lowerCamelCase , gen_len=_lowerCamelCase , preds=_lowerCamelCase , target=_lowerCamelCase , **_lowerCamelCase )
return base_metrics
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Optional[Any]:
return self._generative_step(_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[Any]:
return self.validation_epoch_end(_lowerCamelCase , prefix='''test''' )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->SeqaSeqDataset:
SCREAMING_SNAKE_CASE : Optional[Any] = self.n_obs[type_path]
SCREAMING_SNAKE_CASE : Optional[int] = self.target_lens[type_path]
SCREAMING_SNAKE_CASE : List[str] = self.dataset_class(
self.tokenizer , type_path=_lowerCamelCase , n_obs=_lowerCamelCase , max_target_length=_lowerCamelCase , **self.dataset_kwargs , )
return dataset
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = False ) ->DataLoader:
SCREAMING_SNAKE_CASE : Any = self.get_dataset(_lowerCamelCase )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
SCREAMING_SNAKE_CASE : Optional[int] = dataset.make_sortish_sampler(_lowerCamelCase , distributed=self.hparams.gpus > 1 )
return DataLoader(
_lowerCamelCase , batch_size=_lowerCamelCase , collate_fn=dataset.collate_fn , shuffle=_lowerCamelCase , num_workers=self.num_workers , sampler=_lowerCamelCase , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
SCREAMING_SNAKE_CASE : Union[str, Any] = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
_lowerCamelCase , batch_sampler=_lowerCamelCase , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
_lowerCamelCase , batch_size=_lowerCamelCase , collate_fn=dataset.collate_fn , shuffle=_lowerCamelCase , num_workers=self.num_workers , sampler=_lowerCamelCase , )
def __lowerCAmelCase ( self ) ->DataLoader:
SCREAMING_SNAKE_CASE : Any = self.get_dataloader('''train''' , batch_size=self.hparams.train_batch_size , shuffle=_lowerCamelCase )
return dataloader
def __lowerCAmelCase ( self ) ->DataLoader:
return self.get_dataloader('''val''' , batch_size=self.hparams.eval_batch_size )
def __lowerCAmelCase ( self ) ->DataLoader:
return self.get_dataloader('''test''' , batch_size=self.hparams.eval_batch_size )
@staticmethod
def __lowerCAmelCase ( _lowerCamelCase , _lowerCamelCase ) ->List[Any]:
BaseTransformer.add_model_specific_args(_lowerCamelCase , _lowerCamelCase )
add_generic_args(_lowerCamelCase , _lowerCamelCase )
parser.add_argument(
'''--max_source_length''' , default=1024 , type=_lowerCamelCase , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--max_target_length''' , default=56 , type=_lowerCamelCase , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--val_max_target_length''' , default=142 , type=_lowerCamelCase , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--test_max_target_length''' , default=142 , type=_lowerCamelCase , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument('''--freeze_encoder''' , action='''store_true''' )
parser.add_argument('''--freeze_embeds''' , action='''store_true''' )
parser.add_argument('''--sortish_sampler''' , action='''store_true''' , default=_lowerCamelCase )
parser.add_argument('''--overwrite_output_dir''' , action='''store_true''' , default=_lowerCamelCase )
parser.add_argument('''--max_tokens_per_batch''' , type=_lowerCamelCase , default=_lowerCamelCase )
parser.add_argument('''--logger_name''' , type=_lowerCamelCase , choices=['''default''', '''wandb''', '''wandb_shared'''] , default='''default''' )
parser.add_argument('''--n_train''' , type=_lowerCamelCase , default=-1 , required=_lowerCamelCase , help='''# examples. -1 means use all.''' )
parser.add_argument('''--n_val''' , type=_lowerCamelCase , default=500 , required=_lowerCamelCase , help='''# examples. -1 means use all.''' )
parser.add_argument('''--n_test''' , type=_lowerCamelCase , default=-1 , required=_lowerCamelCase , help='''# examples. -1 means use all.''' )
parser.add_argument(
'''--task''' , type=_lowerCamelCase , default='''summarization''' , required=_lowerCamelCase , help='''# examples. -1 means use all.''' )
parser.add_argument('''--label_smoothing''' , type=_lowerCamelCase , default=0.0 , required=_lowerCamelCase )
parser.add_argument('''--src_lang''' , type=_lowerCamelCase , default='''''' , required=_lowerCamelCase )
parser.add_argument('''--tgt_lang''' , type=_lowerCamelCase , default='''''' , required=_lowerCamelCase )
parser.add_argument('''--eval_beams''' , type=_lowerCamelCase , default=_lowerCamelCase , required=_lowerCamelCase )
parser.add_argument(
'''--val_metric''' , type=_lowerCamelCase , default=_lowerCamelCase , required=_lowerCamelCase , choices=['''bleu''', '''rouge2''', '''loss''', None] )
parser.add_argument('''--eval_max_gen_length''' , type=_lowerCamelCase , default=_lowerCamelCase , help='''never generate more than n tokens''' )
parser.add_argument('''--save_top_k''' , type=_lowerCamelCase , default=1 , required=_lowerCamelCase , help='''How many checkpoints to save''' )
parser.add_argument(
'''--early_stopping_patience''' , type=_lowerCamelCase , default=-1 , required=_lowerCamelCase , help=(
'''-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'''
''' val_check_interval will effect it.'''
) , )
return parser
class a_ ( _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = """translation"""
__SCREAMING_SNAKE_CASE : Tuple = ["""loss"""]
__SCREAMING_SNAKE_CASE : List[Any] = ["""bleu"""]
__SCREAMING_SNAKE_CASE : Tuple = """bleu"""
def __init__( self , _lowerCamelCase , **_lowerCamelCase ) ->List[Any]:
super().__init__(_lowerCamelCase , **_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = hparams.src_lang
SCREAMING_SNAKE_CASE : List[Any] = hparams.tgt_lang
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->dict:
return calculate_bleu(_lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase_( a__ , a__=None ):
"""simple docstring"""
Path(args.output_dir ).mkdir(exist_ok=_lowercase )
check_output_dir(_lowercase , expected_items=3 )
if model is None:
if "summarization" in args.task:
SCREAMING_SNAKE_CASE : SummarizationModule = SummarizationModule(_lowercase )
else:
SCREAMING_SNAKE_CASE : SummarizationModule = TranslationModule(_lowercase )
SCREAMING_SNAKE_CASE : Tuple = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('''/tmp''' )
or str(args.output_dir ).startswith('''/var''' )
):
SCREAMING_SNAKE_CASE : List[str] = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
SCREAMING_SNAKE_CASE : List[Any] = os.environ.get('''WANDB_PROJECT''' , _lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = WandbLogger(name=model.output_dir.name , project=_lowercase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
SCREAMING_SNAKE_CASE : Union[str, Any] = WandbLogger(name=model.output_dir.name , project=F"""hf_{dataset}""" )
if args.early_stopping_patience >= 0:
SCREAMING_SNAKE_CASE : Any = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : int = args.val_metric == """loss"""
SCREAMING_SNAKE_CASE : pl.Trainer = generic_train(
_lowercase , _lowercase , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , _lowercase ) , early_stopping_callback=_lowercase , logger=_lowercase , )
pickle_save(model.hparams , model.output_dir / '''hparams.pkl''' )
if not args.do_predict:
return model
SCREAMING_SNAKE_CASE : Tuple = """"""
SCREAMING_SNAKE_CASE : Union[str, Any] = sorted(glob.glob(os.path.join(args.output_dir , '''*.ckpt''' ) , recursive=_lowercase ) )
if checkpoints:
SCREAMING_SNAKE_CASE : Any = checkpoints[-1]
SCREAMING_SNAKE_CASE : int = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
a__ : Optional[int] = argparse.ArgumentParser()
a__ : Optional[int] = pl.Trainer.add_argparse_args(parser)
a__ : str = SummarizationModule.add_model_specific_args(parser, os.getcwd())
a__ : Union[str, Any] = parser.parse_args()
main(args)
| 313 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_A : str = StableDiffusionSAGPipeline
_A : Optional[Any] = TEXT_TO_IMAGE_PARAMS
_A : Any = TEXT_TO_IMAGE_BATCH_PARAMS
_A : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
_A : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
_A : List[str] = False
def lowerCAmelCase_ ( self: Optional[Any] ) -> str:
torch.manual_seed(0 )
snake_case_ :Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
snake_case_ :Any = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=snake_case , set_alpha_to_one=snake_case , )
torch.manual_seed(0 )
snake_case_ :Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
snake_case_ :Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
snake_case_ :Tuple = CLIPTextModel(snake_case )
snake_case_ :str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
snake_case_ :Dict = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCAmelCase_ ( self: List[str] , snake_case: Tuple , snake_case: List[str]=0 ) -> str:
if str(snake_case ).startswith("""mps""" ):
snake_case_ :Tuple = torch.manual_seed(snake_case )
else:
snake_case_ :Optional[int] = torch.Generator(device=snake_case ).manual_seed(snake_case )
snake_case_ :Any = {
"""prompt""": """.""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 1.0,
"""sag_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase_ ( self: Optional[int] ) -> str:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self: int ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self: int ) -> List[str]:
snake_case_ :Any = StableDiffusionSAGPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" )
snake_case_ :int = sag_pipe.to(snake_case )
sag_pipe.set_progress_bar_config(disable=snake_case )
snake_case_ :Union[str, Any] = """."""
snake_case_ :str = torch.manual_seed(0 )
snake_case_ :str = sag_pipe(
[prompt] , generator=snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" )
snake_case_ :List[Any] = output.images
snake_case_ :Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
snake_case_ :List[Any] = np.array([0.1_5_6_8, 0.1_7_3_8, 0.1_6_9_5, 0.1_6_9_3, 0.1_5_0_7, 0.1_7_0_5, 0.1_5_4_7, 0.1_7_5_1, 0.1_9_4_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def lowerCAmelCase_ ( self: Dict ) -> str:
snake_case_ :Tuple = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
snake_case_ :Optional[int] = sag_pipe.to(snake_case )
sag_pipe.set_progress_bar_config(disable=snake_case )
snake_case_ :Tuple = """."""
snake_case_ :Union[str, Any] = torch.manual_seed(0 )
snake_case_ :Tuple = sag_pipe(
[prompt] , generator=snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" )
snake_case_ :Optional[int] = output.images
snake_case_ :Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
snake_case_ :Tuple = np.array([0.3_4_5_9, 0.2_8_7_6, 0.2_5_3_7, 0.3_0_0_2, 0.2_6_7_1, 0.2_1_6_0, 0.3_0_2_6, 0.2_2_6_2, 0.2_3_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def lowerCAmelCase_ ( self: List[str] ) -> List[str]:
snake_case_ :Optional[int] = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
snake_case_ :int = sag_pipe.to(snake_case )
sag_pipe.set_progress_bar_config(disable=snake_case )
snake_case_ :Tuple = """."""
snake_case_ :Optional[int] = torch.manual_seed(0 )
snake_case_ :List[str] = sag_pipe(
[prompt] , width=768 , height=512 , generator=snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" , )
snake_case_ :Optional[Any] = output.images
assert image.shape == (1, 512, 768, 3)
| 66 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
_lowerCamelCase =None
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ="""▁"""
_lowerCamelCase ={"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
_lowerCamelCase ={
"""vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""},
"""tokenizer_file""": {
"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"""
},
}
_lowerCamelCase ={
"""google/pegasus-xsum""": 5_1_2,
}
class A__ ( _lowerCAmelCase):
_UpperCAmelCase : Tuple = VOCAB_FILES_NAMES
_UpperCAmelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : Optional[Any] = PegasusTokenizer
_UpperCAmelCase : Optional[int] = ["""input_ids""", """attention_mask"""]
def __init__( self , __magic_name__=None , __magic_name__=None , __magic_name__="<pad>" , __magic_name__="</s>" , __magic_name__="<unk>" , __magic_name__="<mask_2>" , __magic_name__="<mask_1>" , __magic_name__=None , __magic_name__=1_0_3 , **__magic_name__ , ):
lowerCamelCase : Dict = offset
if additional_special_tokens is not None:
if not isinstance(__magic_name__ , __magic_name__ ):
raise TypeError(
F'''additional_special_tokens should be of type {type(__magic_name__ )}, but is'''
F''' {type(__magic_name__ )}''' )
lowerCamelCase : str = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F'''<unk_{i}>''' for i in range(len(__magic_name__ ) , self.offset - 1 )
]
if len(set(__magic_name__ ) ) != len(__magic_name__ ):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
F''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
lowerCamelCase : Tuple = additional_special_tokens_extended
else:
lowerCamelCase : Dict = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F'''<unk_{i}>''' for i in range(2 , self.offset )]
super().__init__(
__magic_name__ , tokenizer_file=__magic_name__ , pad_token=__magic_name__ , eos_token=__magic_name__ , unk_token=__magic_name__ , mask_token=__magic_name__ , mask_token_sent=__magic_name__ , offset=__magic_name__ , additional_special_tokens=__magic_name__ , **__magic_name__ , )
lowerCamelCase : Optional[int] = vocab_file
lowerCamelCase : str = False if not self.vocab_file else True
def UpperCamelCase__ ( self , __magic_name__ ):
lowerCamelCase : Optional[Any] = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
"""There should be 3 special tokens: mask_token, pad_token, and eos_token +"""
F''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = False ):
if already_has_special_tokens:
return self._special_token_mask(__magic_name__ )
elif token_ids_a is None:
return self._special_token_mask(__magic_name__ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__magic_name__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase : Optional[int] = os.path.join(
__magic_name__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ):
copyfile(self.vocab_file , __magic_name__ )
return (out_vocab_file,)
| 287 |
"""simple docstring"""
from __future__ import annotations
from collections import Counter
from random import random
class lowerCamelCase :
'''simple docstring'''
def __init__( self: Tuple ) -> Optional[Any]:
snake_case_ :Optional[int] = {}
def lowerCAmelCase_ ( self: Dict , snake_case: str ) -> None:
snake_case_ :str = {}
def lowerCAmelCase_ ( self: Optional[int] , snake_case: str , snake_case: str , snake_case: float ) -> None:
if nodea not in self.connections:
self.add_node(snake_case )
if nodea not in self.connections:
self.add_node(snake_case )
snake_case_ :Dict = probability
def lowerCAmelCase_ ( self: List[Any] ) -> list[str]:
return list(self.connections )
def lowerCAmelCase_ ( self: Any , snake_case: str ) -> str:
snake_case_ :Optional[Any] = 0
snake_case_ :List[str] = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def A_ ( _lowercase, _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :List[str] = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(_lowercase, _lowercase, _lowercase )
snake_case_ :int = Counter(graph.get_nodes() )
snake_case_ :Optional[Any] = start
for _ in range(_lowercase ):
snake_case_ :Tuple = graph.transition(_lowercase )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 66 | 0 |
def __magic_name__ ( __a : Dict ):
'''simple docstring'''
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(_lowercase ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 244 |
"""simple docstring"""
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
__a = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
__a = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
__a = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
__a = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__a = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
__a = [
("pretraining", "MODEL_FOR_PRETRAINING_MAPPING_NAMES", "AutoModelForPreTraining"),
("feature-extraction", "MODEL_MAPPING_NAMES", "AutoModel"),
("audio-classification", "MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioClassification"),
("text-generation", "MODEL_FOR_CAUSAL_LM_MAPPING_NAMES", "AutoModelForCausalLM"),
("automatic-speech-recognition", "MODEL_FOR_CTC_MAPPING_NAMES", "AutoModelForCTC"),
("image-classification", "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForImageClassification"),
("image-segmentation", "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES", "AutoModelForImageSegmentation"),
("fill-mask", "MODEL_FOR_MASKED_LM_MAPPING_NAMES", "AutoModelForMaskedLM"),
("object-detection", "MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForObjectDetection"),
(
"zero-shot-object-detection",
"MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES",
"AutoModelForZeroShotObjectDetection",
),
("question-answering", "MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForQuestionAnswering"),
("text2text-generation", "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES", "AutoModelForSeq2SeqLM"),
("text-classification", "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForSequenceClassification"),
("automatic-speech-recognition", "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES", "AutoModelForSpeechSeq2Seq"),
(
"table-question-answering",
"MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForTableQuestionAnswering",
),
("token-classification", "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES", "AutoModelForTokenClassification"),
("multiple-choice", "MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES", "AutoModelForMultipleChoice"),
(
"next-sentence-prediction",
"MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES",
"AutoModelForNextSentencePrediction",
),
(
"audio-frame-classification",
"MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForAudioFrameClassification",
),
("audio-xvector", "MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES", "AutoModelForAudioXVector"),
(
"document-question-answering",
"MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForDocumentQuestionAnswering",
),
(
"visual-question-answering",
"MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForVisualQuestionAnswering",
),
("image-to-text", "MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES", "AutoModelForVision2Seq"),
(
"zero-shot-image-classification",
"MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForZeroShotImageClassification",
),
("depth-estimation", "MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES", "AutoModelForDepthEstimation"),
("video-classification", "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForVideoClassification"),
("mask-generation", "MODEL_FOR_MASK_GENERATION_MAPPING_NAMES", "AutoModelForMaskGeneration"),
]
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :Any = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""", _lowercase )
return [m.group(0 ) for m in matches]
def A_ ( ):
'''simple docstring'''
snake_case_ :int = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
snake_case_ :Dict = {
config.replace("""Config""", """""" ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
snake_case_ :Optional[Any] = collections.defaultdict(_lowercase )
snake_case_ :int = collections.defaultdict(_lowercase )
snake_case_ :List[str] = collections.defaultdict(_lowercase )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(_lowercase ):
snake_case_ :int = None
if _re_tf_models.match(_lowercase ) is not None:
snake_case_ :int = tf_models
snake_case_ :List[str] = _re_tf_models.match(_lowercase ).groups()[0]
elif _re_flax_models.match(_lowercase ) is not None:
snake_case_ :List[Any] = flax_models
snake_case_ :Any = _re_flax_models.match(_lowercase ).groups()[0]
elif _re_pt_models.match(_lowercase ) is not None:
snake_case_ :Optional[Any] = pt_models
snake_case_ :int = _re_pt_models.match(_lowercase ).groups()[0]
if lookup_dict is not None:
while len(_lowercase ) > 0:
if attr_name in model_prefix_to_model_type:
snake_case_ :Optional[int] = True
break
# Try again after removing the last word in the name
snake_case_ :Optional[Any] = """""".join(camel_case_split(_lowercase )[:-1] )
snake_case_ :Optional[int] = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
snake_case_ :Optional[Any] = list(_lowercase )
all_models.sort()
snake_case_ :Optional[int] = {"""model_type""": all_models}
snake_case_ :Optional[int] = [pt_models[t] for t in all_models]
snake_case_ :Any = [tf_models[t] for t in all_models]
snake_case_ :Dict = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
snake_case_ :Dict = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
snake_case_ :Optional[Any] = """AutoProcessor"""
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
snake_case_ :Tuple = """AutoTokenizer"""
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
snake_case_ :Tuple = """AutoFeatureExtractor"""
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
snake_case_ :str = """AutoTokenizer"""
snake_case_ :int = [processors[t] for t in all_models]
return pd.DataFrame(_lowercase )
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :List[Any] = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
snake_case_ :Optional[int] = [model_mapping, f"""TF_{model_mapping}""", f"""FLAX_{model_mapping}"""]
snake_case_ :List[str] = [auto_class, f"""TF_{auto_class}""", f"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(_lowercase, _lowercase, _lowercase ):
# The type of pipeline may not exist in this framework
if not hasattr(_lowercase, _lowercase ):
continue
# First extract all model_names
snake_case_ :Tuple = []
for name in getattr(_lowercase, _lowercase ).values():
if isinstance(_lowercase, _lowercase ):
model_names.append(_lowercase )
else:
model_names.extend(list(_lowercase ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :List[Any] = get_frameworks_table()
snake_case_ :str = Dataset.from_pandas(_lowercase )
snake_case_ :List[Any] = hf_hub_download(
"""huggingface/transformers-metadata""", """pipeline_tags.json""", repo_type="""dataset""", token=_lowercase )
snake_case_ :List[str] = Dataset.from_json(_lowercase )
snake_case_ :int = {
tags_dataset[i]["""model_class"""]: (tags_dataset[i]["""pipeline_tag"""], tags_dataset[i]["""auto_class"""])
for i in range(len(_lowercase ) )
}
snake_case_ :Optional[int] = update_pipeline_and_auto_class_table(_lowercase )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
snake_case_ :Tuple = sorted(table.keys() )
snake_case_ :Tuple = pd.DataFrame(
{
"""model_class""": model_classes,
"""pipeline_tag""": [table[m][0] for m in model_classes],
"""auto_class""": [table[m][1] for m in model_classes],
} )
snake_case_ :Union[str, Any] = Dataset.from_pandas(_lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(_lowercase, """frameworks.json""" ) )
tags_dataset.to_json(os.path.join(_lowercase, """pipeline_tags.json""" ) )
if commit_sha is not None:
snake_case_ :Union[str, Any] = (
f"""Update with commit {commit_sha}\n\nSee: """
f"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
snake_case_ :List[Any] = """Update"""
upload_folder(
repo_id="""huggingface/transformers-metadata""", folder_path=_lowercase, repo_type="""dataset""", token=_lowercase, commit_message=_lowercase, )
def A_ ( ):
'''simple docstring'''
snake_case_ :List[Any] = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
snake_case_ :Dict = transformers_module.pipelines.SUPPORTED_TASKS
snake_case_ :List[str] = []
for key in pipeline_tasks:
if key not in in_table:
snake_case_ :int = pipeline_tasks[key]["""pt"""]
if isinstance(_lowercase, (list, tuple) ):
snake_case_ :Any = model[0]
snake_case_ :str = model.__name__
if model not in in_table.values():
missing.append(_lowercase )
if len(_lowercase ) > 0:
snake_case_ :Optional[int] = """, """.join(_lowercase )
raise ValueError(
"""The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside """
f"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument("--token", type=str, help="The token to use to push to the transformers-metadata dataset.")
parser.add_argument("--commit_sha", type=str, help="The sha of the commit going with this update.")
parser.add_argument("--check-only", action="store_true", help="Activate to just check all pipelines are present.")
__a = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 66 | 0 |
"""simple docstring"""
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowerCamelCase_ : List[str] = logging.get_logger(__name__)
class __A ( _lowerCAmelCase ):
"""simple docstring"""
__lowerCAmelCase = ["""input_values""", """attention_mask"""]
def __init__( self , __A = 1 , __A = 1_6000 , __A = 0.0 , __A = False , __A = 80 , __A = 16 , __A = 64 , __A = "hann_window" , __A = 1.0 , __A = 80 , __A = 7600 , __A = 1E-1_0 , __A = 2 , __A = True , **__A , ) -> Union[str, Any]:
super().__init__(feature_size=__A , sampling_rate=__A , padding_value=__A , **__A )
a =do_normalize
a =return_attention_mask
a =num_mel_bins
a =hop_length
a =win_length
a =win_function
a =frame_signal_scale
a =fmin
a =fmax
a =mel_floor
a =reduction_factor
a =win_length * sampling_rate // 1000
a =hop_length * sampling_rate // 1000
a =optimal_fft_length(self.sample_size )
a =(self.n_fft // 2) + 1
a =window_function(window_length=self.sample_size , name=self.win_function , periodic=__A )
a =mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='''slaney''' , mel_scale='''slaney''' , )
if frame_signal_scale != 1.0:
warnings.warn(
'''The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers''' , __A , )
if reduction_factor != 2.0:
warnings.warn(
'''The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers''' , __A , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def SCREAMING_SNAKE_CASE ( __A , __A , __A = 0.0 ) -> List[np.ndarray]:
if attention_mask is not None:
a =np.array(__A , np.intaa )
a =[]
for vector, length in zip(__A , attention_mask.sum(-1 ) ):
a =(vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
a =padding_value
normed_input_values.append(__A )
else:
a =[(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def SCREAMING_SNAKE_CASE ( self , __A , ) -> np.ndarray:
a =spectrogram(
__A , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='''log10''' , )
return log_mel_spec.T
def __call__( self , __A = None , __A = None , __A = False , __A = None , __A = False , __A = None , __A = None , __A = None , __A = None , **__A , ) -> BatchFeature:
if audio is None and audio_target is None:
raise ValueError('''You must provide either `audio` or `audio_target` values.''' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the ``sampling_rate`` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
if audio is not None:
a =self._process_audio(
__A , __A , __A , __A , __A , __A , __A , __A , **__A , )
else:
a =None
if audio_target is not None:
a =self._process_audio(
__A , __A , __A , __A , __A , __A , __A , __A , **__A , )
if inputs is None:
return inputs_target
else:
a =inputs_target["""input_values"""]
a =inputs_target.get('''attention_mask''' )
if decoder_attention_mask is not None:
a =decoder_attention_mask
return inputs
def SCREAMING_SNAKE_CASE ( self , __A , __A = False , __A = False , __A = None , __A = False , __A = None , __A = None , __A = None , **__A , ) -> BatchFeature:
a =isinstance(__A , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
a =is_batched_numpy or (
isinstance(__A , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
a =[np.asarray(__A , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(__A , np.ndarray ):
a =np.asarray(__A , dtype=np.floataa )
elif isinstance(__A , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
a =speech.astype(np.floataa )
# always return batch
if not is_batched:
a =[speech]
# needed to make pad() work on spectrogram inputs
a =self.feature_size
# convert into correct format for padding
if is_target:
a =[self._extract_mel_features(__A ) for waveform in speech]
a =BatchFeature({'''input_values''': features} )
a =self.num_mel_bins
else:
a =BatchFeature({'''input_values''': speech} )
a =self.pad(
__A , padding=__A , max_length=__A , truncation=__A , pad_to_multiple_of=__A , return_attention_mask=__A , **__A , )
a =feature_size_hack
# convert input values to correct format
a =padded_inputs["""input_values"""]
if not isinstance(input_values[0] , np.ndarray ):
a =[np.asarray(__A , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(__A , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
a =[array.astype(np.floataa ) for array in input_values]
elif isinstance(__A , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
a =input_values.astype(np.floataa )
# convert attention_mask to correct format
a =padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
a =[np.asarray(__A , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
a =(
attention_mask
if self._get_padding_strategies(__A , max_length=__A ) is not PaddingStrategy.DO_NOT_PAD
else None
)
a =self.zero_mean_unit_var_norm(
padded_inputs['''input_values'''] , attention_mask=__A , padding_value=self.padding_value )
if return_tensors is not None:
a =padded_inputs.convert_to_tensors(__A )
return padded_inputs
def SCREAMING_SNAKE_CASE ( self ) -> Dict[str, Any]:
a =super().to_dict()
# Don't serialize these as they are derived from the other properties.
a =["""window""", """mel_filters""", """sample_size""", """sample_stride""", """n_fft""", """n_freqs"""]
for name in names:
if name in output:
del output[name]
return output | 81 |
"""simple docstring"""
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
__a = logging.getLogger(__name__)
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
_A : Union[str, Any] = """token-classification"""
def __init__( self: Any , snake_case: Tuple ) -> List[Any]:
if type(snake_case ) == dict:
snake_case_ :Optional[int] = Namespace(**snake_case )
snake_case_ :Optional[int] = import_module("""tasks""" )
try:
snake_case_ :Any = getattr(snake_case , hparams.task_type )
snake_case_ :TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"""Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
snake_case_ :Any = self.token_classification_task.get_labels(hparams.labels )
snake_case_ :str = CrossEntropyLoss().ignore_index
super().__init__(snake_case , len(self.labels ) , self.mode )
def lowerCAmelCase_ ( self: Dict , **snake_case: List[Any] ) -> Any:
return self.model(**snake_case )
def lowerCAmelCase_ ( self: str , snake_case: Tuple , snake_case: List[Any] ) -> Optional[int]:
snake_case_ :List[str] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
snake_case_ :List[str] = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
snake_case_ :Optional[Any] = self(**snake_case )
snake_case_ :List[str] = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def lowerCAmelCase_ ( self: int ) -> Dict:
snake_case_ :List[Any] = self.hparams
for mode in ["train", "dev", "test"]:
snake_case_ :Optional[int] = self._feature_file(snake_case )
if os.path.exists(snake_case ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , snake_case )
snake_case_ :Optional[int] = torch.load(snake_case )
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir )
snake_case_ :Optional[int] = self.token_classification_task.read_examples_from_file(args.data_dir , snake_case )
snake_case_ :Any = self.token_classification_task.convert_examples_to_features(
snake_case , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["""xlnet"""] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["""xlnet"""] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=snake_case , pad_on_left=bool(self.config.model_type in ["""xlnet"""] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("""Saving features into cached file %s""" , snake_case )
torch.save(snake_case , snake_case )
def lowerCAmelCase_ ( self: Optional[int] , snake_case: int , snake_case: int , snake_case: bool = False ) -> DataLoader:
snake_case_ :int = self._feature_file(snake_case )
logger.info("""Loading features from cached file %s""" , snake_case )
snake_case_ :str = torch.load(snake_case )
snake_case_ :Dict = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
snake_case_ :str = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
snake_case_ :List[Any] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
snake_case_ :List[str] = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
snake_case_ :Any = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(snake_case , snake_case , snake_case , snake_case ) , batch_size=snake_case )
def lowerCAmelCase_ ( self: List[str] , snake_case: Dict , snake_case: Union[str, Any] ) -> List[str]:
"""Compute validation""" ""
snake_case_ :List[str] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
snake_case_ :Dict = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
snake_case_ :Dict = self(**snake_case )
snake_case_, snake_case_ :Dict = outputs[:2]
snake_case_ :Union[str, Any] = logits.detach().cpu().numpy()
snake_case_ :List[Any] = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def lowerCAmelCase_ ( self: List[Any] , snake_case: int ) -> Tuple:
snake_case_ :Union[str, Any] = torch.stack([x["""val_loss"""] for x in outputs] ).mean()
snake_case_ :Tuple = np.concatenate([x["""pred"""] for x in outputs] , axis=0 )
snake_case_ :Tuple = np.argmax(snake_case , axis=2 )
snake_case_ :List[str] = np.concatenate([x["""target"""] for x in outputs] , axis=0 )
snake_case_ :Optional[Any] = dict(enumerate(self.labels ) )
snake_case_ :Dict = [[] for _ in range(out_label_ids.shape[0] )]
snake_case_ :Dict = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
snake_case_ :str = {
"""val_loss""": val_loss_mean,
"""accuracy_score""": accuracy_score(snake_case , snake_case ),
"""precision""": precision_score(snake_case , snake_case ),
"""recall""": recall_score(snake_case , snake_case ),
"""f1""": fa_score(snake_case , snake_case ),
}
snake_case_ :List[Any] = dict(results.items() )
snake_case_ :Union[str, Any] = results
return ret, preds_list, out_label_list
def lowerCAmelCase_ ( self: Optional[Any] , snake_case: Dict ) -> Optional[Any]:
# when stable
snake_case_, snake_case_, snake_case_ :Tuple = self._eval_end(snake_case )
snake_case_ :str = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def lowerCAmelCase_ ( self: Tuple , snake_case: Optional[int] ) -> Any:
# updating to test_epoch_end instead of deprecated test_end
snake_case_, snake_case_, snake_case_ :Any = self._eval_end(snake_case )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
snake_case_ :Optional[int] = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def lowerCAmelCase_ ( snake_case: Any , snake_case: int ) -> Dict:
# Add NER specific options
BaseTransformer.add_model_specific_args(snake_case , snake_case )
parser.add_argument(
"""--task_type""" , default="""NER""" , type=snake_case , help="""Task type to fine tune in training (e.g. NER, POS, etc)""" )
parser.add_argument(
"""--max_seq_length""" , default=128 , type=snake_case , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--labels""" , default="""""" , type=snake_case , help="""Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=snake_case , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
return parser
if __name__ == "__main__":
__a = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
__a = NERTransformer.add_model_specific_args(parser, os.getcwd())
__a = parser.parse_args()
__a = NERTransformer(args)
__a = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
__a = sorted(glob.glob(os.path.join(args.output_dir, "checkpoint-epoch=*.ckpt"), recursive=True))
__a = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 66 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections import Counter
from random import random
class A__ :
def __init__( self :Tuple ) -> Optional[Any]:
'''simple docstring'''
_a : Optional[int] ={}
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :str ) -> None:
'''simple docstring'''
_a : str ={}
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :float ) -> None:
'''simple docstring'''
if nodea not in self.connections:
self.add_node(SCREAMING_SNAKE_CASE )
if nodea not in self.connections:
self.add_node(SCREAMING_SNAKE_CASE )
_a : Dict =probability
def __UpperCAmelCase ( self :List[Any] ) -> list[str]:
'''simple docstring'''
return list(self.connections )
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :str ) -> str:
'''simple docstring'''
_a : Optional[Any] =0
_a : List[str] =random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Dict ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : List[Any] ) -> Tuple:
_a : List[str] =MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(_lowercase ,_lowercase ,_lowercase )
_a : int =Counter(graph.get_nodes() )
_a : Optional[Any] =start
for _ in range(_lowercase ):
_a : Tuple =graph.transition(_lowercase )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 276 |
"""simple docstring"""
from math import factorial
class lowerCamelCase :
'''simple docstring'''
def __init__( self: Optional[int] , snake_case: Dict , snake_case: int ) -> Tuple:
snake_case_ :List[Any] = real
if isinstance(snake_case , snake_case ):
snake_case_ :Tuple = [1] * rank
else:
snake_case_ :Optional[Any] = rank
def __repr__( self: List[str] ) -> Tuple:
return (
f"""{self.real}+"""
f"""{'+'.join(str(snake_case )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"""
)
def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[int]:
snake_case_ :Any = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , snake_case )
def __add__( self: Optional[int] , snake_case: Dict ) -> List[str]:
if not isinstance(snake_case , snake_case ):
return Dual(self.real + other , self.duals )
snake_case_ :List[Any] = self.duals.copy()
snake_case_ :Tuple = other.duals.copy()
if len(snake_case ) > len(snake_case ):
o_dual.extend([1] * (len(snake_case ) - len(snake_case )) )
elif len(snake_case ) < len(snake_case ):
s_dual.extend([1] * (len(snake_case ) - len(snake_case )) )
snake_case_ :Dict = []
for i in range(len(snake_case ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , snake_case )
_A : str = __add__
def __sub__( self: Tuple , snake_case: Union[str, Any] ) -> Tuple:
return self + other * -1
def __mul__( self: str , snake_case: Tuple ) -> Optional[Any]:
if not isinstance(snake_case , snake_case ):
snake_case_ :Dict = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , snake_case )
snake_case_ :int = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , snake_case )
_A : int = __mul__
def __truediv__( self: List[str] , snake_case: List[str] ) -> List[str]:
if not isinstance(snake_case , snake_case ):
snake_case_ :Optional[Any] = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , snake_case )
raise ValueError
def __floordiv__( self: int , snake_case: List[Any] ) -> Any:
if not isinstance(snake_case , snake_case ):
snake_case_ :Optional[int] = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , snake_case )
raise ValueError
def __pow__( self: Optional[Any] , snake_case: Optional[int] ) -> List[Any]:
if n < 0 or isinstance(snake_case , snake_case ):
raise ValueError("""power must be a positive integer""" )
if n == 0:
return 1
if n == 1:
return self
snake_case_ :str = self
for _ in range(n - 1 ):
x *= self
return x
def A_ ( _lowercase, _lowercase, _lowercase ):
'''simple docstring'''
if not callable(_lowercase ):
raise ValueError("""differentiate() requires a function as input for func""" )
if not isinstance(_lowercase, (float, int) ):
raise ValueError("""differentiate() requires a float as input for position""" )
if not isinstance(_lowercase, _lowercase ):
raise ValueError("""differentiate() requires an int as input for order""" )
snake_case_ :Optional[Any] = Dual(_lowercase, 1 )
snake_case_ :List[Any] = func(_lowercase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def A_ ( _lowercase ):
'''simple docstring'''
return y**2 * y**4
print(differentiate(f, 9, 2))
| 66 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=_lowerCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *lowerCAmelCase__ : int , **lowerCAmelCase__ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class A__ ( metaclass=_lowerCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : str = ["""sentencepiece"""]
def __init__( self : List[Any] , *lowerCAmelCase__ : Optional[Any] , **lowerCAmelCase__ : List[Any] ) -> Any:
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class A__ ( metaclass=_lowerCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : int = ["""sentencepiece"""]
def __init__( self : List[str] , *lowerCAmelCase__ : Dict , **lowerCAmelCase__ : int ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class A__ ( metaclass=_lowerCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : Any = ["""sentencepiece"""]
def __init__( self : int , *lowerCAmelCase__ : int , **lowerCAmelCase__ : List[str] ) -> str:
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class A__ ( metaclass=_lowerCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : Dict = ["""sentencepiece"""]
def __init__( self : List[str] , *lowerCAmelCase__ : List[str] , **lowerCAmelCase__ : Dict ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class A__ ( metaclass=_lowerCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : Any = ["""sentencepiece"""]
def __init__( self : Dict , *lowerCAmelCase__ : str , **lowerCAmelCase__ : Dict ) -> Any:
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class A__ ( metaclass=_lowerCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = ["""sentencepiece"""]
def __init__( self : Tuple , *lowerCAmelCase__ : List[Any] , **lowerCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class A__ ( metaclass=_lowerCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : Dict = ["""sentencepiece"""]
def __init__( self : Union[str, Any] , *lowerCAmelCase__ : List[str] , **lowerCAmelCase__ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class A__ ( metaclass=_lowerCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : Tuple = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *lowerCAmelCase__ : List[str] , **lowerCAmelCase__ : Tuple ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class A__ ( metaclass=_lowerCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : str = ["""sentencepiece"""]
def __init__( self : List[Any] , *lowerCAmelCase__ : Optional[int] , **lowerCAmelCase__ : int ) -> str:
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class A__ ( metaclass=_lowerCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[int] = ["""sentencepiece"""]
def __init__( self : Union[str, Any] , *lowerCAmelCase__ : str , **lowerCAmelCase__ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class A__ ( metaclass=_lowerCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : int = ["""sentencepiece"""]
def __init__( self : List[Any] , *lowerCAmelCase__ : Optional[Any] , **lowerCAmelCase__ : int ) -> List[str]:
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class A__ ( metaclass=_lowerCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : str = ["""sentencepiece"""]
def __init__( self : List[str] , *lowerCAmelCase__ : int , **lowerCAmelCase__ : Tuple ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class A__ ( metaclass=_lowerCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[int] = ["""sentencepiece"""]
def __init__( self : str , *lowerCAmelCase__ : Optional[int] , **lowerCAmelCase__ : Any ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class A__ ( metaclass=_lowerCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : List[str] = ["""sentencepiece"""]
def __init__( self : str , *lowerCAmelCase__ : Tuple , **lowerCAmelCase__ : int ) -> str:
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class A__ ( metaclass=_lowerCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : Dict = ["""sentencepiece"""]
def __init__( self : Dict , *lowerCAmelCase__ : int , **lowerCAmelCase__ : List[str] ) -> List[str]:
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class A__ ( metaclass=_lowerCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = ["""sentencepiece"""]
def __init__( self : List[Any] , *lowerCAmelCase__ : List[str] , **lowerCAmelCase__ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class A__ ( metaclass=_lowerCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : Dict = ["""sentencepiece"""]
def __init__( self : Optional[int] , *lowerCAmelCase__ : str , **lowerCAmelCase__ : List[Any] ) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class A__ ( metaclass=_lowerCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : Dict = ["""sentencepiece"""]
def __init__( self : Tuple , *lowerCAmelCase__ : str , **lowerCAmelCase__ : Tuple ) -> int:
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class A__ ( metaclass=_lowerCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : str = ["""sentencepiece"""]
def __init__( self : Union[str, Any] , *lowerCAmelCase__ : Optional[Any] , **lowerCAmelCase__ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class A__ ( metaclass=_lowerCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : Any = ["""sentencepiece"""]
def __init__( self : Tuple , *lowerCAmelCase__ : Optional[Any] , **lowerCAmelCase__ : Any ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class A__ ( metaclass=_lowerCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = ["""sentencepiece"""]
def __init__( self : Any , *lowerCAmelCase__ : Optional[Any] , **lowerCAmelCase__ : List[str] ) -> str:
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class A__ ( metaclass=_lowerCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : int = ["""sentencepiece"""]
def __init__( self : str , *lowerCAmelCase__ : List[Any] , **lowerCAmelCase__ : Any ) -> int:
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class A__ ( metaclass=_lowerCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : Dict = ["""sentencepiece"""]
def __init__( self : Optional[int] , *lowerCAmelCase__ : Optional[int] , **lowerCAmelCase__ : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class A__ ( metaclass=_lowerCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = ["""sentencepiece"""]
def __init__( self : Optional[int] , *lowerCAmelCase__ : str , **lowerCAmelCase__ : Dict ) -> str:
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class A__ ( metaclass=_lowerCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = ["""sentencepiece"""]
def __init__( self : Union[str, Any] , *lowerCAmelCase__ : int , **lowerCAmelCase__ : List[Any] ) -> List[str]:
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class A__ ( metaclass=_lowerCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : Any = ["""sentencepiece"""]
def __init__( self : List[Any] , *lowerCAmelCase__ : str , **lowerCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class A__ ( metaclass=_lowerCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : str = ["""sentencepiece"""]
def __init__( self : Union[str, Any] , *lowerCAmelCase__ : List[str] , **lowerCAmelCase__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class A__ ( metaclass=_lowerCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[int] = ["""sentencepiece"""]
def __init__( self : List[str] , *lowerCAmelCase__ : str , **lowerCAmelCase__ : Any ) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class A__ ( metaclass=_lowerCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : int = ["""sentencepiece"""]
def __init__( self : Tuple , *lowerCAmelCase__ : Any , **lowerCAmelCase__ : Optional[Any] ) -> Dict:
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class A__ ( metaclass=_lowerCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : List[str] = ["""sentencepiece"""]
def __init__( self : str , *lowerCAmelCase__ : List[str] , **lowerCAmelCase__ : Optional[int] ) -> Tuple:
"""simple docstring"""
requires_backends(self , ["sentencepiece"] ) | 145 |
"""simple docstring"""
from __future__ import annotations
__a = 10
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :Union[str, Any] = 1
snake_case_ :List[str] = max(_lowercase )
while placement <= max_digit:
# declare and initialize empty buckets
snake_case_ :list[list] = [[] for _ in range(_lowercase )]
# split list_of_ints between the buckets
for i in list_of_ints:
snake_case_ :Any = int((i / placement) % RADIX )
buckets[tmp].append(_lowercase )
# put each buckets' contents into list_of_ints
snake_case_ :Optional[Any] = 0
for b in range(_lowercase ):
for i in buckets[b]:
snake_case_ :Union[str, Any] = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 66 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A : Union[str, Any] = {
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = [
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
__A : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 260 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__a = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["ReformerTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["ReformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
"ReformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 66 | 0 |
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class snake_case__( _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = [R"""h\.\d+\.attn\.bias""", R"""h\.\d+\.attn\.masked_bias"""]
@register_to_config
def __init__( self , __lowercase , __lowercase , __lowercase = None , __lowercase = 5_0_2_5_7 , __lowercase = 1_0_2_4 , __lowercase = 7_6_8 , __lowercase = 1_2 , __lowercase = 1_2 , __lowercase = None , __lowercase = "gelu_new" , __lowercase = 0.1 , __lowercase = 0.1 , __lowercase = 0.1 , __lowercase = 1e-5 , __lowercase = 0.02 , __lowercase = True , __lowercase = True , __lowercase = False , __lowercase = False , ) -> Tuple:
super().__init__()
lowerCAmelCase_ : Tuple = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"""
f""" `n_embd`: {n_embd} are not equal.""" )
lowerCAmelCase_ : Union[str, Any] = prefix_inner_dim
lowerCAmelCase_ : Optional[Any] = prefix_hidden_dim
lowerCAmelCase_ : Dict = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
lowerCAmelCase_ : str = (
nn.Linear(self.prefix_hidden_dim , __lowercase ) if self.prefix_hidden_dim is not None else nn.Identity()
)
lowerCAmelCase_ : Any = GPTaConfig(
vocab_size=__lowercase , n_positions=__lowercase , n_embd=__lowercase , n_layer=__lowercase , n_head=__lowercase , n_inner=__lowercase , activation_function=__lowercase , resid_pdrop=__lowercase , embd_pdrop=__lowercase , attn_pdrop=__lowercase , layer_norm_epsilon=__lowercase , initializer_range=__lowercase , scale_attn_weights=__lowercase , use_cache=__lowercase , scale_attn_by_inverse_layer_idx=__lowercase , reorder_and_upcast_attn=__lowercase , )
lowerCAmelCase_ : Dict = GPTaLMHeadModel(__lowercase )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase = None , __lowercase = None , ) -> Union[str, Any]:
lowerCAmelCase_ : Tuple = self.transformer.transformer.wte(__lowercase )
lowerCAmelCase_ : str = self.encode_prefix(__lowercase )
lowerCAmelCase_ : List[Any] = self.decode_prefix(__lowercase )
lowerCAmelCase_ : Dict = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
lowerCAmelCase_ : Tuple = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
lowerCAmelCase_ : Union[str, Any] = torch.cat((dummy_token, input_ids) , dim=1 )
lowerCAmelCase_ : Tuple = self.transformer(inputs_embeds=__lowercase , labels=__lowercase , attention_mask=__lowercase )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def lowercase_ ( self , __lowercase , __lowercase ) -> torch.Tensor:
return torch.zeros(__lowercase , self.prefix_length , dtype=torch.intaa , device=__lowercase )
def lowercase_ ( self , __lowercase ) -> List[Any]:
return self.encode_prefix(__lowercase )
@torch.no_grad()
def lowercase_ ( self , __lowercase , __lowercase , __lowercase ) -> Dict:
lowerCAmelCase_ : List[Any] = torch.split(__lowercase , 1 , dim=0 )
lowerCAmelCase_ : Optional[int] = []
lowerCAmelCase_ : str = []
for feature in features:
lowerCAmelCase_ : Tuple = self.decode_prefix(feature.to(__lowercase ) ) # back to the clip feature
# Only support beam search for now
lowerCAmelCase_ : Union[str, Any] = self.generate_beam(
input_embeds=__lowercase , device=__lowercase , eos_token_id=__lowercase )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
lowerCAmelCase_ : Optional[int] = torch.stack(__lowercase )
lowerCAmelCase_ : Tuple = torch.stack(__lowercase )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def lowercase_ ( self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase = 5 , __lowercase = 6_7 , __lowercase = 1.0 , __lowercase = None , ) -> Tuple:
lowerCAmelCase_ : int = eos_token_id
lowerCAmelCase_ : Tuple = None
lowerCAmelCase_ : Union[str, Any] = None
lowerCAmelCase_ : int = torch.ones(__lowercase , device=__lowercase , dtype=torch.int )
lowerCAmelCase_ : List[Any] = torch.zeros(__lowercase , device=__lowercase , dtype=torch.bool )
if input_embeds is not None:
lowerCAmelCase_ : str = input_embeds
else:
lowerCAmelCase_ : Optional[int] = self.transformer.transformer.wte(__lowercase )
for i in range(__lowercase ):
lowerCAmelCase_ : str = self.transformer(inputs_embeds=__lowercase )
lowerCAmelCase_ : int = outputs.logits
lowerCAmelCase_ : Tuple = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
lowerCAmelCase_ : List[str] = logits.softmax(-1 ).log()
if scores is None:
lowerCAmelCase_ : Optional[int] = logits.topk(__lowercase , -1 )
lowerCAmelCase_ : Union[str, Any] = generated.expand(__lowercase , *generated.shape[1:] )
lowerCAmelCase_ : Optional[Any] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
lowerCAmelCase_ : str = next_tokens
else:
lowerCAmelCase_ : Any = tokens.expand(__lowercase , *tokens.shape[1:] )
lowerCAmelCase_ : Any = torch.cat((tokens, next_tokens) , dim=1 )
else:
lowerCAmelCase_ : Union[str, Any] = -float(np.inf )
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : Any = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
lowerCAmelCase_ : Any = scores_sum / seq_lengths[:, None]
lowerCAmelCase_ : str = scores_sum_average.view(-1 ).topk(__lowercase , -1 )
lowerCAmelCase_ : List[str] = next_tokens // scores_sum.shape[1]
lowerCAmelCase_ : Optional[Any] = seq_lengths[next_tokens_source]
lowerCAmelCase_ : List[str] = next_tokens % scores_sum.shape[1]
lowerCAmelCase_ : Union[str, Any] = next_tokens.unsqueeze(1 )
lowerCAmelCase_ : Optional[int] = tokens[next_tokens_source]
lowerCAmelCase_ : Dict = torch.cat((tokens, next_tokens) , dim=1 )
lowerCAmelCase_ : List[str] = generated[next_tokens_source]
lowerCAmelCase_ : str = scores_sum_average * seq_lengths
lowerCAmelCase_ : str = is_stopped[next_tokens_source]
lowerCAmelCase_ : List[str] = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
lowerCAmelCase_ : Optional[int] = torch.cat((generated, next_token_embed) , dim=1 )
lowerCAmelCase_ : Optional[int] = is_stopped + next_tokens.eq(__lowercase ).squeeze()
if is_stopped.all():
break
lowerCAmelCase_ : Union[str, Any] = scores / seq_lengths
lowerCAmelCase_ : List[str] = scores.argsort(descending=__lowercase )
# tokens tensors are already padded to max_seq_length
lowerCAmelCase_ : Union[str, Any] = [tokens[i] for i in order]
lowerCAmelCase_ : Optional[Any] = torch.stack(__lowercase , dim=0 )
lowerCAmelCase_ : List[Any] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths | 262 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self: List[Any] ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowerCAmelCase_ ( self: Tuple ) -> Any:
snake_case_, snake_case_ :List[str] = FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-canny""" , from_pt=snake_case , dtype=jnp.bfloataa )
snake_case_, snake_case_ :Union[str, Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , controlnet=snake_case , from_pt=snake_case , dtype=jnp.bfloataa )
snake_case_ :Union[str, Any] = controlnet_params
snake_case_ :Union[str, Any] = """bird"""
snake_case_ :List[Any] = jax.device_count()
snake_case_ :List[Any] = pipe.prepare_text_inputs([prompts] * num_samples )
snake_case_ :List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" )
snake_case_ :List[str] = pipe.prepare_image_inputs([canny_image] * num_samples )
snake_case_ :Any = jax.random.PRNGKey(0 )
snake_case_ :List[str] = jax.random.split(snake_case , jax.device_count() )
snake_case_ :List[Any] = replicate(snake_case )
snake_case_ :List[str] = shard(snake_case )
snake_case_ :str = shard(snake_case )
snake_case_ :Dict = pipe(
prompt_ids=snake_case , image=snake_case , params=snake_case , prng_seed=snake_case , num_inference_steps=50 , jit=snake_case , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
snake_case_ :str = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
snake_case_ :Union[str, Any] = images[0, 253:256, 253:256, -1]
snake_case_ :str = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case_ :Dict = jnp.array(
[0.1_6_7_9_6_9, 0.1_1_6_6_9_9, 0.0_8_1_5_4_3, 0.1_5_4_2_9_7, 0.1_3_2_8_1_2, 0.1_0_8_8_8_7, 0.1_6_9_9_2_2, 0.1_6_9_9_2_2, 0.2_0_5_0_7_8] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowerCAmelCase_ ( self: int ) -> Dict:
snake_case_, snake_case_ :List[Any] = FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-openpose""" , from_pt=snake_case , dtype=jnp.bfloataa )
snake_case_, snake_case_ :int = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , controlnet=snake_case , from_pt=snake_case , dtype=jnp.bfloataa )
snake_case_ :str = controlnet_params
snake_case_ :Optional[int] = """Chef in the kitchen"""
snake_case_ :Union[str, Any] = jax.device_count()
snake_case_ :Any = pipe.prepare_text_inputs([prompts] * num_samples )
snake_case_ :str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png""" )
snake_case_ :Optional[Any] = pipe.prepare_image_inputs([pose_image] * num_samples )
snake_case_ :str = jax.random.PRNGKey(0 )
snake_case_ :str = jax.random.split(snake_case , jax.device_count() )
snake_case_ :Tuple = replicate(snake_case )
snake_case_ :str = shard(snake_case )
snake_case_ :int = shard(snake_case )
snake_case_ :List[str] = pipe(
prompt_ids=snake_case , image=snake_case , params=snake_case , prng_seed=snake_case , num_inference_steps=50 , jit=snake_case , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
snake_case_ :str = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
snake_case_ :int = images[0, 253:256, 253:256, -1]
snake_case_ :Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case_ :Optional[int] = jnp.array(
[[0.2_7_1_4_8_4, 0.2_6_1_7_1_9, 0.2_7_5_3_9_1, 0.2_7_7_3_4_4, 0.2_7_9_2_9_7, 0.2_9_1_0_1_6, 0.2_9_4_9_2_2, 0.3_0_2_7_3_4, 0.3_0_2_7_3_4]] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 66 | 0 |
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A_ = len(_lowercase )
print("The following activities are selected:" )
# The first activity is always selected
A_ = 0
print(_lowercase ,end="," )
# Consider rest of the activities
for j in range(_lowercase ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(_lowercase ,end="," )
A_ = j
if __name__ == "__main__":
import doctest
doctest.testmod()
__a :str = [1, 3, 0, 5, 8, 5]
__a :Union[str, Any] = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish) | 312 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a = {
"configuration_mobilebert": [
"MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"MobileBertConfig",
"MobileBertOnnxConfig",
],
"tokenization_mobilebert": ["MobileBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["MobileBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileBertForMaskedLM",
"MobileBertForMultipleChoice",
"MobileBertForNextSentencePrediction",
"MobileBertForPreTraining",
"MobileBertForQuestionAnswering",
"MobileBertForSequenceClassification",
"MobileBertForTokenClassification",
"MobileBertLayer",
"MobileBertModel",
"MobileBertPreTrainedModel",
"load_tf_weights_in_mobilebert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileBertForMaskedLM",
"TFMobileBertForMultipleChoice",
"TFMobileBertForNextSentencePrediction",
"TFMobileBertForPreTraining",
"TFMobileBertForQuestionAnswering",
"TFMobileBertForSequenceClassification",
"TFMobileBertForTokenClassification",
"TFMobileBertMainLayer",
"TFMobileBertModel",
"TFMobileBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 66 | 0 |
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Tuple:
lowerCAmelCase__ : int = {}
lowerCAmelCase__ : List[Any] = job["""started_at"""]
lowerCAmelCase__ : int = job["""completed_at"""]
lowerCAmelCase__ : str = date_parser.parse(_lowercase )
lowerCAmelCase__ : Tuple = date_parser.parse(_lowercase )
lowerCAmelCase__ : Optional[int] = round((end_datetime - start_datetime).total_seconds() / 60.0 )
lowerCAmelCase__ : int = start
lowerCAmelCase__ : Optional[int] = end
lowerCAmelCase__ : Optional[Any] = duration_in_min
return job_info
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> Union[str, Any]:
lowerCAmelCase__ : Optional[Any] = None
if token is not None:
lowerCAmelCase__ : Optional[Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": F'''Bearer {token}'''}
lowerCAmelCase__ : Union[str, Any] = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
lowerCAmelCase__ : Optional[int] = requests.get(_lowercase , headers=_lowercase ).json()
lowerCAmelCase__ : Optional[Any] = {}
try:
job_time.update({job['name']: extract_time_from_single_job(_lowercase ) for job in result['jobs']} )
lowerCAmelCase__ : int = math.ceil((result['total_count'] - 100) / 100 )
for i in range(_lowercase ):
lowerCAmelCase__ : Union[str, Any] = requests.get(url + F'''&page={i + 2}''' , headers=_lowercase ).json()
job_time.update({job['name']: extract_time_from_single_job(_lowercase ) for job in result['jobs']} )
return job_time
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
lowerCamelCase__ = parser.parse_args()
lowerCamelCase__ = get_job_time(args.workflow_run_id)
lowerCamelCase__ = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F"""{k}: {v["duration"]}""") | 212 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :Union[str, Any] = os.path.join(args.tf_model_dir, """parameters.json""" )
snake_case_ :Any = json.loads(open(_lowercase ).read() )
if not params:
raise ValueError(
f"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith(""".pt""" ):
snake_case_ :Optional[int] = args.output + """.pt"""
snake_case_ :List[str] = OrderedDict()
with tf.device("""/CPU:0""" ):
snake_case_ :Dict = tf.train.load_checkpoint(args.tf_model_dir )
snake_case_ :str = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
snake_case_ :List[Any] = reader.get_tensor(_lowercase ).astype(np.floataa )
if key_name.endswith("""/adam_m""" ) or key_name.endswith("""/adam_v""" ):
continue
if key_name.startswith("""pasts/""" ):
if key_name.startswith("""pasts/mlp""" ):
snake_case_ :Any = int(key_name[9] )
elif key_name.startswith("""pasts/out""" ):
snake_case_ :Optional[int] = 8
snake_case_ :List[str] = """model.sqout.%d.weight""" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
snake_case_ :Optional[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ :List[str] = torch.tensor(_lowercase )
elif key_name.startswith("""model/moe""" ):
snake_case_ :Tuple = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/switch_gating/kernel""" ):
snake_case_ :Union[str, Any] = """model.blocks.%d.feed_forward.mlp.router.classifier.weight""" % player
snake_case_ :Optional[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Optional[Any] = torch.tensor(_lowercase )
elif key_name.endswith("""/softmlp/kernel""" ):
snake_case_ :List[Any] = """model.blocks.%d.feed_forward.soft_bypass_mlp.weight""" % player
snake_case_ :Optional[int] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Optional[Any] = torch.tensor(_lowercase )
elif key_name.endswith("""/wo/kernel""" ) or key_name.endswith("""/wi/kernel""" ):
snake_case_ :Dict = key_name[-9:-7]
for i in range(16 ):
snake_case_ :str = """model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight""" % (player, i, nlayer)
snake_case_ :Tuple = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
snake_case_ :Optional[int] = torch.tensor(_lowercase )
elif key_name.startswith("""model/mlp""" ):
snake_case_ :Optional[int] = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/p1/kernel""" ):
snake_case_ :Union[str, Any] = """model.blocks.%d.feed_forward.mlp.wi.weight""" % player
snake_case_ :Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Optional[Any] = torch.tensor(_lowercase )
elif key_name.endswith("""/p1/bias""" ):
snake_case_ :List[Any] = """model.blocks.%d.feed_forward.mlp.wi.bias""" % player
snake_case_ :str = vnp.copy() # same because it is one dimensional
snake_case_ :Optional[Any] = torch.tensor(_lowercase )
elif key_name.endswith("""/p2/kernel""" ):
snake_case_ :Union[str, Any] = """model.blocks.%d.feed_forward.mlp.wo.weight""" % player
snake_case_ :Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Tuple = torch.tensor(_lowercase )
elif key_name.endswith("""/p2/bias""" ):
snake_case_ :Dict = """model.blocks.%d.feed_forward.mlp.wo.bias""" % player
snake_case_ :Any = vnp.copy() # same because it is one dimensional
snake_case_ :Optional[int] = torch.tensor(_lowercase )
elif key_name.startswith("""model/ln""" ):
snake_case_ :Union[str, Any] = int(key_name[8:].split("""/""" )[0] )
if key_name.endswith("""/b""" ):
snake_case_ :str = """model.blocks.%d.feed_forward.norm.bias""" % player
snake_case_ :Dict = vnp.copy() # same because it is one dimensional
snake_case_ :int = torch.tensor(_lowercase )
elif key_name.endswith("""/g""" ):
snake_case_ :Dict = """model.blocks.%d.feed_forward.norm.weight""" % player
snake_case_ :Dict = vnp.copy() # same because it is one dimensional
snake_case_ :Tuple = torch.tensor(_lowercase )
elif key_name.startswith("""model/att""" ):
snake_case_ :List[str] = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/qkv/kernel""" ):
snake_case_ :Optional[int] = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
snake_case_ :Dict = state[:, 0, :, :]
snake_case_ :int = state[:, 1, :, :]
snake_case_ :List[str] = state[:, 2, :, :]
snake_case_ :str = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Any = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Optional[int] = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ :int = """model.blocks.%d.self_attn.self_attn.q_proj.weight""" % player
snake_case_ :int = torch.tensor(_lowercase )
snake_case_ :Optional[Any] = """model.blocks.%d.self_attn.self_attn.k_proj.weight""" % player
snake_case_ :Dict = torch.tensor(_lowercase )
snake_case_ :Dict = """model.blocks.%d.self_attn.self_attn.v_proj.weight""" % player
snake_case_ :Optional[Any] = torch.tensor(_lowercase )
elif key_name.endswith("""/o/kernel""" ):
snake_case_ :str = """model.blocks.%d.self_attn.self_attn.out_proj.weight""" % player
snake_case_ :str = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Any = torch.tensor(_lowercase )
elif key_name.startswith("""model/an""" ):
snake_case_ :Optional[int] = int(key_name[8:].split("""/""" )[0] )
if key_name.endswith("""/b""" ):
snake_case_ :Any = """model.blocks.%d.self_attn.norm.bias""" % player
snake_case_ :Optional[int] = vnp.copy() # same because it is one dimensional
snake_case_ :Tuple = torch.tensor(_lowercase )
elif key_name.endswith("""/g""" ):
snake_case_ :Union[str, Any] = """model.blocks.%d.self_attn.norm.weight""" % player
snake_case_ :Dict = vnp.copy() # same because it is one dimensional
snake_case_ :Optional[int] = torch.tensor(_lowercase )
elif (
key_name.startswith("""model/wte""" )
or key_name.startswith("""model/wpe""" )
or key_name.startswith("""model/ete""" )
):
snake_case_ :List[Any] = {"""wte""": """embed_tokens""", """wpe""": """position_embeddings""", """ete""": """extra_position_embeddings"""}[
key_name[-3:]
]
snake_case_ :Optional[Any] = """model.%s.weight""" % nlayer
snake_case_ :Any = vnp.copy() # same in embedded
snake_case_ :List[Any] = torch.tensor(_lowercase )
if key_name.startswith("""model/wte""" ):
snake_case_ :Tuple = """lm_head.weight"""
snake_case_ :List[str] = vnp.copy() # same in embedded
snake_case_ :List[Any] = torch.tensor(_lowercase )
elif key_name.startswith("""model/wob""" ):
snake_case_ :str = """final_logits_bias"""
snake_case_ :Any = vnp.copy() # same in embedded
snake_case_ :List[Any] = state.reshape((1, -1) )
snake_case_ :Union[str, Any] = torch.tensor(_lowercase )
elif key_name == "model/dense/kernel":
snake_case_ :str = """model.last_project.weight"""
snake_case_ :Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ :int = torch.tensor(_lowercase )
elif key_name == "model/dense_1/bias":
snake_case_ :Optional[int] = """model.last_project.bias"""
snake_case_ :Tuple = vnp.copy() # same because it is one dimensional
snake_case_ :Any = torch.tensor(_lowercase )
torch.save(_lowercase, args.output )
if __name__ == "__main__":
__a = argparse.ArgumentParser(
description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model")
parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model")
__a = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 66 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
# Construct model
if gpta_config_file == "":
__lowerCamelCase = GPTaConfig()
else:
__lowerCamelCase = GPTaConfig.from_json_file(UpperCamelCase__ )
__lowerCamelCase = GPTaModel(UpperCamelCase__ )
# Load weights from numpy
load_tf_weights_in_gpta(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model
__lowerCamelCase = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
__lowerCamelCase = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , UpperCamelCase__ )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__UpperCAmelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--gpt2_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--gpt2_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
__UpperCAmelCase =parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 67 | '''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ ) -> Optional[Any]:
__lowerCamelCase = []
__lowerCamelCase = set({'''(''', '''[''', '''{'''} )
__lowerCamelCase = set({''')''', ''']''', '''}'''} )
__lowerCamelCase = {'''{''': '''}''', '''[''': ''']''', '''(''': ''')'''}
for i in range(len(UpperCamelCase__ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(UpperCamelCase__ ) == 0 or (len(UpperCamelCase__ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(UpperCamelCase__ ) == 0
def __lowerCAmelCase ( ) -> str:
__lowerCamelCase = input('''Enter sequence of brackets: ''' )
if is_balanced(UpperCamelCase__ ):
print(UpperCamelCase__ , '''is balanced''' )
else:
print(UpperCamelCase__ , '''is not balanced''' )
if __name__ == "__main__":
main()
| 67 | 1 |
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase =Dict[str, Any]
__UpperCAmelCase =List[Prediction]
@add_end_docstrings(UpperCAmelCase__ )
class a__ ( UpperCAmelCase__ ):
def __init__( self : Optional[int] , *a : int , **a : List[Any] ):
"""simple docstring"""
super().__init__(*a , **a )
if self.framework == "tf":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , '''vision''' )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , **a : str ):
"""simple docstring"""
__lowerCamelCase = {}
if "threshold" in kwargs:
__lowerCamelCase = kwargs['''threshold''']
return {}, {}, postprocess_kwargs
def __call__( self : str , *a : str , **a : Union[str, Any] ):
"""simple docstring"""
return super().__call__(*a , **a )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , a : int ):
"""simple docstring"""
__lowerCamelCase = load_image(a )
__lowerCamelCase = torch.IntTensor([[image.height, image.width]] )
__lowerCamelCase = self.image_processor(images=[image] , return_tensors='''pt''' )
if self.tokenizer is not None:
__lowerCamelCase = self.tokenizer(text=inputs['''words'''] , boxes=inputs['''boxes'''] , return_tensors='''pt''' )
__lowerCamelCase = target_size
return inputs
def SCREAMING_SNAKE_CASE__ ( self : str , a : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = model_inputs.pop('''target_size''' )
__lowerCamelCase = self.model(**a )
__lowerCamelCase = outputs.__class__({'''target_size''': target_size, **outputs} )
if self.tokenizer is not None:
__lowerCamelCase = model_inputs['''bbox''']
return model_outputs
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , a : int , a : List[str]=0.9 ):
"""simple docstring"""
__lowerCamelCase = model_outputs['''target_size''']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
__lowerCamelCase , __lowerCamelCase = target_size[0].tolist()
def unnormalize(a : Any ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 10_00),
(height * bbox[1] / 10_00),
(width * bbox[2] / 10_00),
(height * bbox[3] / 10_00),
] ) )
__lowerCamelCase , __lowerCamelCase = model_outputs['''logits'''].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
__lowerCamelCase = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
__lowerCamelCase = [unnormalize(a ) for bbox in model_outputs['''bbox'''].squeeze(0 )]
__lowerCamelCase = ['''score''', '''label''', '''box''']
__lowerCamelCase = [dict(zip(a , a ) ) for vals in zip(scores.tolist() , a , a ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
__lowerCamelCase = self.image_processor.post_process_object_detection(a , a , a )
__lowerCamelCase = raw_annotations[0]
__lowerCamelCase = raw_annotation['''scores''']
__lowerCamelCase = raw_annotation['''labels''']
__lowerCamelCase = raw_annotation['''boxes''']
__lowerCamelCase = scores.tolist()
__lowerCamelCase = [self.model.config.idalabel[label.item()] for label in labels]
__lowerCamelCase = [self._get_bounding_box(a ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
__lowerCamelCase = ['''score''', '''label''', '''box''']
__lowerCamelCase = [
dict(zip(a , a ) )
for vals in zip(raw_annotation['''scores'''] , raw_annotation['''labels'''] , raw_annotation['''boxes'''] )
]
return annotation
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , a : "torch.Tensor" ):
"""simple docstring"""
if self.framework != "pt":
raise ValueError('''The ObjectDetectionPipeline is only available in PyTorch.''' )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = box.int().tolist()
__lowerCamelCase = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 67 | '''simple docstring'''
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Dict ="M-CLIP"
def __init__( self : Tuple , a : Optional[int]=10_24 , a : Tuple=7_68 , **a : List[str] ):
"""simple docstring"""
__lowerCamelCase = transformerDimSize
__lowerCamelCase = imageDimSize
super().__init__(**a )
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Optional[Any] =MCLIPConfig
def __init__( self : str , a : List[Any] , *a : Dict , **a : str ):
"""simple docstring"""
super().__init__(a , *a , **a )
__lowerCamelCase = XLMRobertaModel(a )
__lowerCamelCase = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , a : int , a : List[Any] ):
"""simple docstring"""
__lowerCamelCase = self.transformer(input_ids=a , attention_mask=a )[0]
__lowerCamelCase = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(a ), embs
| 67 | 1 |
'''simple docstring'''
from collections import Counter
from timeit import timeit
def __lowerCAmelCase ( UpperCamelCase__ = "" , ) -> bool:
return sum(c % 2 for c in Counter(input_str.replace(''' ''' , '''''' ).lower() ).values() ) < 2
def __lowerCAmelCase ( UpperCamelCase__ = "" ) -> bool:
if len(UpperCamelCase__ ) == 0:
return True
__lowerCamelCase = input_str.replace(''' ''' , '''''' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
__lowerCamelCase = {}
for character in lower_case_input_str:
__lowerCamelCase = character_freq_dict.get(UpperCamelCase__ , 0 ) + 1
__lowerCamelCase = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def __lowerCAmelCase ( UpperCamelCase__ = "" ) -> None:
print('''\nFor string = ''' , UpperCamelCase__ , ''':''' )
print(
'''> can_string_be_rearranged_as_palindrome_counter()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome_counter(UpperCamelCase__ ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome_counter(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
print(
'''> can_string_be_rearranged_as_palindrome()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome(UpperCamelCase__ ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
if __name__ == "__main__":
__UpperCAmelCase =input(
"Enter string to determine if it can be rearranged as a palindrome or not: "
).strip()
benchmark(check_str)
__UpperCAmelCase =can_string_be_rearranged_as_palindrome_counter(check_str)
print(f'{check_str} can {"" if status else "not "}be rearranged as a palindrome')
| 67 | '''simple docstring'''
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 67 | 1 |
'''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ ) -> Optional[Any]:
__lowerCamelCase = []
__lowerCamelCase = set({'''(''', '''[''', '''{'''} )
__lowerCamelCase = set({''')''', ''']''', '''}'''} )
__lowerCamelCase = {'''{''': '''}''', '''[''': ''']''', '''(''': ''')'''}
for i in range(len(UpperCamelCase__ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(UpperCamelCase__ ) == 0 or (len(UpperCamelCase__ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(UpperCamelCase__ ) == 0
def __lowerCAmelCase ( ) -> str:
__lowerCamelCase = input('''Enter sequence of brackets: ''' )
if is_balanced(UpperCamelCase__ ):
print(UpperCamelCase__ , '''is balanced''' )
else:
print(UpperCamelCase__ , '''is not balanced''' )
if __name__ == "__main__":
main()
| 67 | '''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from numpy import array
def __lowerCAmelCase ( UpperCamelCase__ ) -> list[list[float]]:
__lowerCamelCase = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(UpperCamelCase__ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
__lowerCamelCase = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creates a copy of the matrix with swapped positions of the elements
__lowerCamelCase = [[0.0, 0.0], [0.0, 0.0]]
__lowerCamelCase , __lowerCamelCase = matrix[1][1], matrix[0][0]
__lowerCamelCase , __lowerCamelCase = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(UpperCamelCase__ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(UpperCamelCase__ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
__lowerCamelCase = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creating cofactor matrix
__lowerCamelCase = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
__lowerCamelCase = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
__lowerCamelCase = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
__lowerCamelCase = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
__lowerCamelCase = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
__lowerCamelCase = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
__lowerCamelCase = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
__lowerCamelCase = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
__lowerCamelCase = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
__lowerCamelCase = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
__lowerCamelCase = array(UpperCamelCase__ )
for i in range(3 ):
for j in range(3 ):
__lowerCamelCase = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
__lowerCamelCase = array(UpperCamelCase__ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(UpperCamelCase__ )
# Calculate the inverse of the matrix
return [[float(d(UpperCamelCase__ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('''Please provide a matrix of size 2x2 or 3x3.''' )
| 67 | 1 |
'''simple docstring'''
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> None:
__lowerCamelCase = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ ), f"""{len(UpperCamelCase__ )} != {len(UpperCamelCase__ )}"""
dest_layers.load_state_dict(layers_to_copy.state_dict() )
__UpperCAmelCase ={
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
1_2: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 1_1],
4: [0, 4, 8, 1_1],
6: [0, 2, 4, 7, 9, 1_1],
9: [0, 1, 2, 4, 5, 7, 9, 1_0, 1_1],
1_2: list(range(1_2)),
},
1_6: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 1_5],
3: [0, 8, 1_5],
4: [0, 5, 1_0, 1_5],
6: [0, 3, 6, 9, 1_2, 1_5],
8: [0, 2, 4, 6, 8, 1_0, 1_2, 1_5],
9: [0, 1, 3, 5, 7, 9, 1_1, 1_3, 1_5],
1_2: [0, 1, 2, 3, 4, 5, 6, 7, 9, 1_1, 1_3, 1_5],
1_6: list(range(1_6)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
__UpperCAmelCase ={
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
1_2: {1: [1_1], 2: [5, 1_1], 3: [3, 7, 1_1], 6: [1, 3, 5, 8, 1_0, 1_1]},
1_6: {1: [1_5], 4: [4, 9, 1_2, 1_5], 8: [1, 3, 5, 7, 9, 1_1, 1_3, 1_5]},
}
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
try:
__lowerCamelCase = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
f"""no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"""
f""" {n_student}""" )
return list(range(UpperCamelCase__ ) )
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> List[int]:
if n_student > n_teacher:
raise ValueError(f"""Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}""" )
elif n_teacher == n_student:
return list(range(UpperCamelCase__ ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ = "student" , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__=False , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ , ) -> Tuple[PreTrainedModel, List[int], List[int]]:
__lowerCamelCase = '''encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'''
assert (e is not None) or (d is not None), _msg
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
AutoTokenizer.from_pretrained(UpperCamelCase__ ).save_pretrained(UpperCamelCase__ ) # purely for convenience
__lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase__ ).eval()
else:
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), f"""teacher must be a model or string got type {type(UpperCamelCase__ )}"""
__lowerCamelCase = teacher.config.to_diff_dict()
try:
__lowerCamelCase , __lowerCamelCase = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
__lowerCamelCase = teacher_e
if d is None:
__lowerCamelCase = teacher_d
init_kwargs.update({'''encoder_layers''': e, '''decoder_layers''': d} )
except AttributeError: # T5
if hasattr(teacher.config , '''num_encoder_layers''' ):
__lowerCamelCase , __lowerCamelCase = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
__lowerCamelCase , __lowerCamelCase = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
__lowerCamelCase = teacher_e
if d is None:
__lowerCamelCase = teacher_d
if hasattr(teacher.config , '''num_encoder_layers''' ):
init_kwargs.update({'''num_encoder_layers''': e, '''num_decoder_layers''': d} )
else:
init_kwargs.update({'''num_layers''': e, '''num_decoder_layers''': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(UpperCamelCase__ )
# Copy weights
__lowerCamelCase = teacher.config_class(**UpperCamelCase__ )
__lowerCamelCase = AutoModelForSeqaSeqLM.from_config(UpperCamelCase__ )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
__lowerCamelCase = student.load_state_dict(teacher.state_dict() , strict=UpperCamelCase__ )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
__lowerCamelCase , __lowerCamelCase = list(range(UpperCamelCase__ ) ), list(range(UpperCamelCase__ ) )
logger.info(
f"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"""
f""" {save_path}""" )
student.save_pretrained(UpperCamelCase__ )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
__lowerCamelCase = pick_layers_to_copy(UpperCamelCase__ , UpperCamelCase__ )
if d_layers_to_copy is None:
__lowerCamelCase = pick_layers_to_copy(UpperCamelCase__ , UpperCamelCase__ )
try:
if hasattr(
UpperCamelCase__ , '''prophetnet''' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , UpperCamelCase__ )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , UpperCamelCase__ )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , UpperCamelCase__ )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , UpperCamelCase__ )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , UpperCamelCase__ )
copy_layers(teacher.decoder.block , student.decoder.block , UpperCamelCase__ )
logger.info(
f"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}""" )
__lowerCamelCase = {
'''teacher_type''': teacher.config.model_type,
'''copied_encoder_layers''': e_layers_to_copy,
'''copied_decoder_layers''': d_layers_to_copy,
}
student.save_pretrained(UpperCamelCase__ )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 67 | '''simple docstring'''
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
def __lowerCAmelCase ( UpperCamelCase__=None , UpperCamelCase__=None ) -> int:
return field(default_factory=lambda: default , metadata=UpperCamelCase__ )
@dataclass
class a__ :
lowerCamelCase : List[str] =list_field(
default=[] , metadata={
"help": (
"Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"
" of all available models"
)
} , )
lowerCamelCase : List[int] =list_field(
default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} )
lowerCamelCase : List[int] =list_field(
default=[8, 3_2, 1_2_8, 5_1_2] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , )
lowerCamelCase : bool =field(
default=UpperCAmelCase__ , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , )
lowerCamelCase : bool =field(
default=UpperCAmelCase__ , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , )
lowerCamelCase : bool =field(
default=UpperCAmelCase__ , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} )
lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Use FP16 to accelerate inference."} )
lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Benchmark training of model"} )
lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Verbose memory tracing"} )
lowerCamelCase : bool =field(
default=UpperCAmelCase__ , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , )
lowerCamelCase : bool =field(
default=UpperCAmelCase__ , metadata={
"help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"
} , )
lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Trace memory line by line"} )
lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Save result to a CSV file"} )
lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Save all print statements in a log file"} )
lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Whether to print environment information"} )
lowerCamelCase : bool =field(
default=UpperCAmelCase__ , metadata={
"help": (
"Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"
" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"
" for debugging / testing and on TPU."
)
} , )
lowerCamelCase : str =field(
default=F'''inference_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv."} , )
lowerCamelCase : str =field(
default=F'''inference_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv."} , )
lowerCamelCase : str =field(
default=F'''train_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv for training."} , )
lowerCamelCase : str =field(
default=F'''train_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv for training."} , )
lowerCamelCase : str =field(
default=F'''env_info_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving environment information."} , )
lowerCamelCase : str =field(
default=F'''log_{round(time() )}.csv''' , metadata={"help": "Log filename used if print statements are saved in log."} , )
lowerCamelCase : int =field(default=3 , metadata={"help": "Times an experiment will be run."} )
lowerCamelCase : bool =field(
default=UpperCAmelCase__ , metadata={
"help": (
"Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"
" model weights."
)
} , )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
warnings.warn(
f"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"""
''' are deprecated in general and it is advised to use external Benchmarking libraries '''
''' to benchmark Transformer models.''' , a , )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
if len(self.models ) <= 0:
raise ValueError(
'''Please make sure you provide at least one model name / model identifier, *e.g.* `--models'''
''' bert-base-cased` or `args.models = [\'bert-base-cased\'].''' )
return self.models
@property
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('''Multiprocessing is currently not possible on TPU.''' )
return False
else:
return True
| 67 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase ={
"configuration_mobilevit": ["MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileViTConfig", "MobileViTOnnxConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =["MobileViTFeatureExtractor"]
__UpperCAmelCase =["MobileViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileViTForImageClassification",
"MobileViTForSemanticSegmentation",
"MobileViTModel",
"MobileViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileViTForImageClassification",
"TFMobileViTForSemanticSegmentation",
"TFMobileViTModel",
"TFMobileViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
__UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 67 | '''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
__UpperCAmelCase =None
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase ={"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase ={
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"
),
},
}
__UpperCAmelCase ={
"moussaKam/mbarthez": 1_0_2_4,
"moussaKam/barthez": 1_0_2_4,
"moussaKam/barthez-orangesum-title": 1_0_2_4,
}
__UpperCAmelCase ="▁"
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Optional[int] =VOCAB_FILES_NAMES
lowerCamelCase : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : int =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] =["input_ids", "attention_mask"]
lowerCamelCase : Union[str, Any] =BarthezTokenizer
def __init__( self : Optional[Any] , a : Dict=None , a : str=None , a : List[Any]="<s>" , a : Optional[int]="</s>" , a : List[str]="</s>" , a : Tuple="<s>" , a : str="<unk>" , a : Any="<pad>" , a : Union[str, Any]="<mask>" , **a : Union[str, Any] , ):
"""simple docstring"""
__lowerCamelCase = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
super().__init__(
a , tokenizer_file=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , cls_token=a , pad_token=a , mask_token=a , **a , )
__lowerCamelCase = vocab_file
__lowerCamelCase = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , a : List[int] , a : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
__lowerCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__ ( self : Tuple , a : List[int] , a : Optional[List[int]] = None ):
"""simple docstring"""
__lowerCamelCase = [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , a : str , a : Optional[str] = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCamelCase = os.path.join(
a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ):
copyfile(self.vocab_file , a )
return (out_vocab_file,)
| 67 | 1 |
'''simple docstring'''
import logging
import os
from .state import PartialState
class a__ ( logging.LoggerAdapter ):
@staticmethod
def SCREAMING_SNAKE_CASE__ ( a : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def SCREAMING_SNAKE_CASE__ ( self : int , a : Optional[int] , a : str , *a : Optional[int] , **a : List[Any] ):
"""simple docstring"""
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
__lowerCamelCase = kwargs.pop('''main_process_only''' , a )
__lowerCamelCase = kwargs.pop('''in_order''' , a )
if self.isEnabledFor(a ):
if self._should_log(a ):
__lowerCamelCase , __lowerCamelCase = self.process(a , a )
self.logger.log(a , a , *a , **a )
elif in_order:
__lowerCamelCase = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
__lowerCamelCase , __lowerCamelCase = self.process(a , a )
self.logger.log(a , a , *a , **a )
state.wait_for_everyone()
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ = None ) -> Optional[int]:
if log_level is None:
__lowerCamelCase = os.environ.get('''ACCELERATE_LOG_LEVEL''' , UpperCamelCase__ )
__lowerCamelCase = logging.getLogger(UpperCamelCase__ )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(UpperCamelCase__ , {} )
| 67 | '''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ = 1_00_00_00 ) -> int:
__lowerCamelCase = set(range(3 , UpperCamelCase__ , 2 ) )
primes.add(2 )
for p in range(3 , UpperCamelCase__ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , UpperCamelCase__ , UpperCamelCase__ ) ) )
__lowerCamelCase = [float(UpperCamelCase__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(UpperCamelCase__ , limit + 1 , UpperCamelCase__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 67 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase ={
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : str ="transfo-xl"
lowerCamelCase : Optional[int] =["mems"]
lowerCamelCase : int ={
"n_token": "vocab_size",
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Union[str, Any] , a : List[Any]=26_77_35 , a : Tuple=[2_00_00, 4_00_00, 20_00_00] , a : Tuple=10_24 , a : Tuple=10_24 , a : List[Any]=16 , a : str=64 , a : Optional[Any]=40_96 , a : List[Any]=4 , a : Tuple=False , a : List[str]=18 , a : int=16_00 , a : List[str]=10_00 , a : Union[str, Any]=True , a : Tuple=True , a : Tuple=0 , a : Any=-1 , a : Optional[Any]=True , a : Optional[int]=0.1 , a : Union[str, Any]=0.0 , a : List[Any]=True , a : Union[str, Any]="normal" , a : Dict=0.01 , a : Dict=0.01 , a : int=0.02 , a : int=1e-5 , a : str=0 , **a : Optional[int] , ):
"""simple docstring"""
__lowerCamelCase = vocab_size
__lowerCamelCase = []
self.cutoffs.extend(a )
if proj_share_all_but_first:
__lowerCamelCase = [False] + [True] * len(self.cutoffs )
else:
__lowerCamelCase = [False] + [False] * len(self.cutoffs )
__lowerCamelCase = d_model
__lowerCamelCase = d_embed
__lowerCamelCase = d_head
__lowerCamelCase = d_inner
__lowerCamelCase = div_val
__lowerCamelCase = pre_lnorm
__lowerCamelCase = n_layer
__lowerCamelCase = n_head
__lowerCamelCase = mem_len
__lowerCamelCase = same_length
__lowerCamelCase = attn_type
__lowerCamelCase = clamp_len
__lowerCamelCase = sample_softmax
__lowerCamelCase = adaptive
__lowerCamelCase = dropout
__lowerCamelCase = dropatt
__lowerCamelCase = untie_r
__lowerCamelCase = init
__lowerCamelCase = init_range
__lowerCamelCase = proj_init_std
__lowerCamelCase = init_std
__lowerCamelCase = layer_norm_epsilon
super().__init__(eos_token_id=a , **a )
@property
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def SCREAMING_SNAKE_CASE__ ( self : str , a : List[str] ):
"""simple docstring"""
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 67 | '''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class a__ :
def __init__( self : Union[str, Any] , a : Union[str, Any] , a : Tuple=13 , a : Optional[Any]=7 , a : List[Any]=True , a : Optional[Any]=True , a : Any=True , a : Union[str, Any]=99 , a : Any=32 , a : int=5 , a : Optional[int]=4 , a : Union[str, Any]=37 , a : Optional[Any]="gelu" , a : Union[str, Any]=0.1 , a : Any=0.1 , a : Optional[int]=5_12 , a : int=16 , a : Optional[Any]=2 , a : Union[str, Any]=0.02 , a : Any=3 , a : Dict=4 , a : Any=None , ):
"""simple docstring"""
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = scope
__lowerCamelCase = self.vocab_size - 1
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
__lowerCamelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , a : Dict , a : List[str] , a : Tuple , a : List[Any] , *a : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = OpenAIGPTModel(config=a )
model.to(a )
model.eval()
__lowerCamelCase = model(a , token_type_ids=a , head_mask=a )
__lowerCamelCase = model(a , token_type_ids=a )
__lowerCamelCase = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : Union[str, Any] , a : Dict , a : Union[str, Any] , a : Tuple , *a : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = OpenAIGPTLMHeadModel(a )
model.to(a )
model.eval()
__lowerCamelCase = model(a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : Tuple , a : Optional[int] , a : Union[str, Any] , a : Optional[Any] , *a : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = OpenAIGPTDoubleHeadsModel(a )
model.to(a )
model.eval()
__lowerCamelCase = model(a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , a : int , a : Dict , a : Optional[Any] , a : str , *a : int ):
"""simple docstring"""
__lowerCamelCase = self.num_labels
__lowerCamelCase = OpenAIGPTForSequenceClassification(a )
model.to(a )
model.eval()
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = model(a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) = config_and_inputs
__lowerCamelCase = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class a__ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowerCamelCase : List[str] =(
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
lowerCamelCase : str =(
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
lowerCamelCase : Optional[int] =(
{
"feature-extraction": OpenAIGPTModel,
"text-classification": OpenAIGPTForSequenceClassification,
"text-generation": OpenAIGPTLMHeadModel,
"zero-shot": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , a : Tuple , a : Optional[int] , a : int , a : str , a : Any ):
"""simple docstring"""
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : int , a : Optional[int] , a : str=False ):
"""simple docstring"""
__lowerCamelCase = super()._prepare_for_class(a , a , return_labels=a )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
__lowerCamelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=a , )
__lowerCamelCase = inputs_dict['''labels''']
__lowerCamelCase = inputs_dict['''labels''']
__lowerCamelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=a , )
__lowerCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a )
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = OpenAIGPTModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=a , n_embd=37 )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*a )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*a )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*a )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*a )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = OpenAIGPTModel.from_pretrained(a )
self.assertIsNotNone(a )
@require_torch
class a__ ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
__lowerCamelCase = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(a )
__lowerCamelCase = torch.tensor([[4_81, 47_35, 5_44]] , dtype=torch.long , device=a ) # the president is
__lowerCamelCase = [
4_81,
47_35,
5_44,
2_46,
9_63,
8_70,
7_62,
2_39,
2_44,
4_04_77,
2_44,
2_49,
7_19,
8_81,
4_87,
5_44,
2_40,
2_44,
6_03,
4_81,
] # the president is a very good man. " \n " i\'m sure he is, " said the
__lowerCamelCase = model.generate(a , do_sample=a )
self.assertListEqual(output_ids[0].tolist() , a )
| 67 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
# Initialise PyTorch model
__lowerCamelCase = LxmertConfig.from_json_file(UpperCamelCase__ )
print(f"""Building PyTorch model from configuration: {config}""" )
__lowerCamelCase = LxmertForPreTraining(UpperCamelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , UpperCamelCase__ )
if __name__ == "__main__":
__UpperCAmelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__UpperCAmelCase =parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 67 | '''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Optional[int] =["image_processor", "tokenizer"]
lowerCamelCase : Union[str, Any] ="LayoutLMv2ImageProcessor"
lowerCamelCase : int =("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__( self : Optional[int] , a : Any=None , a : Any=None , **a : Union[str, Any] ):
"""simple docstring"""
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , a , )
__lowerCamelCase = kwargs.pop('''feature_extractor''' )
__lowerCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(a , a )
def __call__( self : Tuple , a : Optional[int] , a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , a : Union[List[List[int]], List[List[List[int]]]] = None , a : Optional[Union[List[int], List[List[int]]]] = None , a : bool = True , a : Union[bool, str, PaddingStrategy] = False , a : Union[bool, str, TruncationStrategy] = None , a : Optional[int] = None , a : int = 0 , a : Optional[int] = None , a : Optional[bool] = None , a : Optional[bool] = None , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = True , a : Optional[Union[str, TensorType]] = None , **a : Tuple , ):
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
__lowerCamelCase = self.image_processor(images=a , return_tensors=a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(a , a ):
__lowerCamelCase = [text] # add batch dimension (as the image processor always adds a batch dimension)
__lowerCamelCase = features['''words''']
__lowerCamelCase = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_token_type_ids=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_length=a , verbose=a , return_tensors=a , **a , )
# add pixel values
__lowerCamelCase = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
__lowerCamelCase = self.get_overflowing_images(a , encoded_inputs['''overflow_to_sample_mapping'''] )
__lowerCamelCase = images
return encoded_inputs
def SCREAMING_SNAKE_CASE__ ( self : Tuple , a : Optional[Any] , a : str ):
"""simple docstring"""
__lowerCamelCase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(a ) != len(a ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f""" {len(a )} and {len(a )}""" )
return images_with_overflow
def SCREAMING_SNAKE_CASE__ ( self : List[str] , *a : Optional[Any] , **a : Union[str, Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*a , **a )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , *a : Union[str, Any] , **a : Tuple ):
"""simple docstring"""
return self.tokenizer.decode(*a , **a )
@property
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , a , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , a , )
return self.image_processor
| 67 | 1 |
'''simple docstring'''
from __future__ import annotations
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> tuple[str, float]:
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif stress < 0:
raise ValueError('''Stress cannot be negative''' )
elif tangential_force < 0:
raise ValueError('''Tangential Force cannot be negative''' )
elif area < 0:
raise ValueError('''Area cannot be negative''' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 67 | '''simple docstring'''
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
class a__ ( UpperCAmelCase__ ):
def __init__( self : Optional[Any] , a : Union[List[ControlNetModel], Tuple[ControlNetModel]] ):
"""simple docstring"""
super().__init__()
__lowerCamelCase = nn.ModuleList(a )
def SCREAMING_SNAKE_CASE__ ( self : Any , a : torch.FloatTensor , a : Union[torch.Tensor, float, int] , a : torch.Tensor , a : List[torch.tensor] , a : List[float] , a : Optional[torch.Tensor] = None , a : Optional[torch.Tensor] = None , a : Optional[torch.Tensor] = None , a : Optional[Dict[str, Any]] = None , a : bool = False , a : bool = True , ):
"""simple docstring"""
for i, (image, scale, controlnet) in enumerate(zip(a , a , self.nets ) ):
__lowerCamelCase , __lowerCamelCase = controlnet(
a , a , a , a , a , a , a , a , a , a , a , )
# merge samples
if i == 0:
__lowerCamelCase , __lowerCamelCase = down_samples, mid_sample
else:
__lowerCamelCase = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(a , a )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def SCREAMING_SNAKE_CASE__ ( self : Any , a : Union[str, os.PathLike] , a : bool = True , a : Callable = None , a : bool = False , a : Optional[str] = None , ):
"""simple docstring"""
__lowerCamelCase = 0
__lowerCamelCase = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
a , is_main_process=a , save_function=a , safe_serialization=a , variant=a , )
idx += 1
__lowerCamelCase = model_path_to_save + f"""_{idx}"""
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : List[str] , a : Optional[Union[str, os.PathLike]] , **a : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = 0
__lowerCamelCase = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
__lowerCamelCase = pretrained_model_path
while os.path.isdir(a ):
__lowerCamelCase = ControlNetModel.from_pretrained(a , **a )
controlnets.append(a )
idx += 1
__lowerCamelCase = pretrained_model_path + f"""_{idx}"""
logger.info(f"""{len(a )} controlnets loaded from {pretrained_model_path}.""" )
if len(a ) == 0:
raise ValueError(
f"""No ControlNets found under {os.path.dirname(a )}. Expected at least {pretrained_model_path + '_0'}.""" )
return cls(a )
| 67 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
__UpperCAmelCase =logging.get_logger("transformers.models.speecht5")
__UpperCAmelCase ={
"speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm",
"speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection",
"speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv",
"speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed",
}
__UpperCAmelCase ={
"text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens",
"text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha",
}
__UpperCAmelCase ={
"speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0",
"speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1",
"speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer",
"speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha",
"speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer",
}
__UpperCAmelCase ={
"speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out",
"speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out",
"speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv",
"speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm",
"speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv",
"speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm",
"speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv",
"speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm",
"speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv",
"speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm",
"speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv",
"speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm",
}
__UpperCAmelCase ={
"text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens",
}
__UpperCAmelCase ={
"text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head",
}
__UpperCAmelCase ={
"encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj",
"encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj",
"encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj",
"encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj",
"encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm",
"encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense",
"encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense",
"encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm",
"encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k",
}
__UpperCAmelCase ={
"decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj",
"decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj",
"decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj",
"decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj",
"decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm",
"decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj",
"decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj",
"decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj",
"decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj",
"decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm",
"decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense",
"decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense",
"decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm",
}
__UpperCAmelCase ={
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
__UpperCAmelCase ={
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__UpperCAmelCase ={
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__UpperCAmelCase =[]
__UpperCAmelCase =[
"encoder.version",
"encoder.layers.*.norm_k.weight",
"encoder.layers.*.norm_k.bias",
"decoder.version",
"decoder.layers.*.norm_k.weight",
"decoder.layers.*.norm_k.bias",
"decoder.pos_emb.pe_k",
"speech_encoder_prenet.embed_positions._float_tensor",
"text_decoder_prenet.embed_positions._float_tensor",
]
__UpperCAmelCase =IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"speech_decoder_prenet.*",
"speech_decoder_postnet.*",
]
__UpperCAmelCase =IGNORE_KEYS + [
"encoder.proj",
"speech_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
__UpperCAmelCase =IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
for attribute in key.split('''.''' ):
__lowerCamelCase = getattr(UpperCamelCase__ , UpperCamelCase__ )
if weight_type is not None:
__lowerCamelCase = getattr(UpperCamelCase__ , UpperCamelCase__ ).shape
else:
__lowerCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__lowerCamelCase = value
elif weight_type == "weight_g":
__lowerCamelCase = value
elif weight_type == "weight_v":
__lowerCamelCase = value
elif weight_type == "bias":
__lowerCamelCase = value
elif weight_type == "running_mean":
__lowerCamelCase = value
elif weight_type == "running_var":
__lowerCamelCase = value
elif weight_type == "num_batches_tracked":
__lowerCamelCase = value
else:
__lowerCamelCase = value
logger.info(f"""{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.""" )
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
__lowerCamelCase , __lowerCamelCase = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
__lowerCamelCase = []
if task == "s2t":
__lowerCamelCase = hf_model.speechta.encoder.prenet.feature_encoder
__lowerCamelCase = MAPPING_S2T
__lowerCamelCase = IGNORE_KEYS_S2T
elif task == "t2s":
__lowerCamelCase = None
__lowerCamelCase = MAPPING_T2S
__lowerCamelCase = IGNORE_KEYS_T2S
elif task == "s2s":
__lowerCamelCase = hf_model.speechta.encoder.prenet.feature_encoder
__lowerCamelCase = MAPPING_S2S
__lowerCamelCase = IGNORE_KEYS_S2S
else:
raise ValueError(f"""Unsupported task: {task}""" )
for name, value in fairseq_dict.items():
if should_ignore(UpperCamelCase__ , UpperCamelCase__ ):
logger.info(f"""{name} was ignored""" )
continue
__lowerCamelCase = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , hf_model.config.feat_extract_norm == '''group''' , )
__lowerCamelCase = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
__lowerCamelCase , __lowerCamelCase = key.split('''.*.''' )
if prefix in name and suffix in name:
__lowerCamelCase = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
__lowerCamelCase = True
if "*" in mapped_key:
__lowerCamelCase = name.split(UpperCamelCase__ )[0].split('''.''' )[-2]
__lowerCamelCase = mapped_key.replace('''*''' , UpperCamelCase__ )
if "weight_g" in name:
__lowerCamelCase = '''weight_g'''
elif "weight_v" in name:
__lowerCamelCase = '''weight_v'''
elif "bias" in name:
__lowerCamelCase = '''bias'''
elif "weight" in name:
__lowerCamelCase = '''weight'''
elif "running_mean" in name:
__lowerCamelCase = '''running_mean'''
elif "running_var" in name:
__lowerCamelCase = '''running_var'''
elif "num_batches_tracked" in name:
__lowerCamelCase = '''num_batches_tracked'''
else:
__lowerCamelCase = None
set_recursively(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
continue
if not is_used:
unused_weights.append(UpperCamelCase__ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
__lowerCamelCase = full_name.split('''conv_layers.''' )[-1]
__lowerCamelCase = name.split('''.''' )
__lowerCamelCase = int(items[0] )
__lowerCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(UpperCamelCase__ )
@torch.no_grad()
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , ) -> int:
if config_path is not None:
__lowerCamelCase = SpeechTaConfig.from_pretrained(UpperCamelCase__ )
else:
__lowerCamelCase = SpeechTaConfig()
if task == "s2t":
__lowerCamelCase = config.max_text_positions
__lowerCamelCase = SpeechTaForSpeechToText(UpperCamelCase__ )
elif task == "t2s":
__lowerCamelCase = 18_76
__lowerCamelCase = 6_00
__lowerCamelCase = config.max_speech_positions
__lowerCamelCase = SpeechTaForTextToSpeech(UpperCamelCase__ )
elif task == "s2s":
__lowerCamelCase = 18_76
__lowerCamelCase = config.max_speech_positions
__lowerCamelCase = SpeechTaForSpeechToSpeech(UpperCamelCase__ )
else:
raise ValueError(f"""Unknown task name: {task}""" )
if vocab_path:
__lowerCamelCase = SpeechTaTokenizer(UpperCamelCase__ , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
__lowerCamelCase = AddedToken('''<mask>''' , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ )
__lowerCamelCase = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
__lowerCamelCase = SpeechTaFeatureExtractor()
__lowerCamelCase = SpeechTaProcessor(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
__lowerCamelCase = torch.load(UpperCamelCase__ )
recursively_load_weights(fairseq_checkpoint['''model'''] , UpperCamelCase__ , UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
if repo_id:
print('''Pushing to the hub...''' )
processor.push_to_hub(UpperCamelCase__ )
model.push_to_hub(UpperCamelCase__ )
if __name__ == "__main__":
__UpperCAmelCase =argparse.ArgumentParser()
parser.add_argument(
"--task",
default="s2t",
type=str,
help="Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
__UpperCAmelCase =parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 67 | '''simple docstring'''
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
__UpperCAmelCase =logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase__ )
class a__ ( UpperCAmelCase__ ):
def __init__( self : List[str] , *a : Union[str, Any] , **a : Optional[Any] ):
"""simple docstring"""
super().__init__(*a , **a )
requires_backends(self , '''vision''' )
self.check_model_type(a )
def __call__( self : Any , a : Union[str, List[str], "Image.Image", List["Image.Image"]] , **a : Optional[int] ):
"""simple docstring"""
return super().__call__(a , **a )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , **a : Any ):
"""simple docstring"""
return {}, {}, {}
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , a : List[str] ):
"""simple docstring"""
__lowerCamelCase = load_image(a )
__lowerCamelCase = image.size
__lowerCamelCase = self.image_processor(images=a , return_tensors=self.framework )
return model_inputs
def SCREAMING_SNAKE_CASE__ ( self : int , a : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = self.model(**a )
return model_outputs
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , a : Any ):
"""simple docstring"""
__lowerCamelCase = model_outputs.predicted_depth
__lowerCamelCase = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=a )
__lowerCamelCase = prediction.squeeze().cpu().numpy()
__lowerCamelCase = (output * 2_55 / np.max(a )).astype('''uint8''' )
__lowerCamelCase = Image.fromarray(a )
__lowerCamelCase = {}
__lowerCamelCase = predicted_depth
__lowerCamelCase = depth
return output_dict
| 67 | 1 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(UpperCAmelCase__ ) , "Tatoeba directory does not exist." )
class a__ ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = tempfile.mkdtemp()
return TatoebaConverter(save_dir=a )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
self.resolver.convert_models(['''heb-eng'''] )
@slow
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=a )
assert mmeta["long_pair"] == "heb-eng"
| 67 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase ={
"configuration_clap": [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapAudioConfig",
"ClapConfig",
"ClapTextConfig",
],
"processing_clap": ["ClapProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapModel",
"ClapPreTrainedModel",
"ClapTextModel",
"ClapTextModelWithProjection",
"ClapAudioModel",
"ClapAudioModelWithProjection",
]
__UpperCAmelCase =["ClapFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
__UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 67 | 1 |
'''simple docstring'''
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
__lowerCamelCase = flax_key_tuple[:-1] + ('''weight''',)
__lowerCamelCase = torch.permute(UpperCamelCase__ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(UpperCamelCase__ ):
# linear layer
__lowerCamelCase = flax_key_tuple[:-1] + ('''weight''',)
__lowerCamelCase = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
__lowerCamelCase = flax_key_tuple[:-1] + ('''weight''',)
return flax_key_tuple, flax_tensor
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
if "metadata" in layer:
__lowerCamelCase = layer.split('''metadata''' )
__lowerCamelCase = ''''''.join(split_layer[0] )[:-1]
__lowerCamelCase = [tuple(('''metadata''' + split_layer[1]).split('''/''' ) )]
elif "kvstore" in layer:
__lowerCamelCase = layer.split('''kvstore''' )
__lowerCamelCase = ''''''.join(split_layer[0] )[:-1]
__lowerCamelCase = [tuple(('''kvstore''' + split_layer[1]).split('''/''' ) )]
else:
__lowerCamelCase = layer.split('''/''' )
__lowerCamelCase = '''/'''.join(split_layer[:-1] )
__lowerCamelCase = (split_layer[-1],)
if "kvstore/path" in layer:
__lowerCamelCase = f"""{switch_checkpoint_path}/{checkpoint_info[layer]}"""
elif "kvstore/driver" in layer:
__lowerCamelCase = '''file'''
else:
__lowerCamelCase = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
__lowerCamelCase = rename_keys(UpperCamelCase__ )
__lowerCamelCase = {}
for k, v in current_block.items():
__lowerCamelCase = v
__lowerCamelCase = new_current_block
torch.save(UpperCamelCase__ , UpperCamelCase__ )
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = WEIGHTS_NAME ) -> Tuple:
__lowerCamelCase = convert_file_size_to_int(UpperCamelCase__ )
__lowerCamelCase = []
__lowerCamelCase = {}
__lowerCamelCase = 0
__lowerCamelCase = 0
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
with gfile.GFile(switch_checkpoint_path + '''/checkpoint''' , '''rb''' ) as fp:
__lowerCamelCase = serialization.msgpack_restore(fp.read() )['''optimizer''']['''target''']
__lowerCamelCase = flatten_dict(UpperCamelCase__ , sep='''/''' )
__lowerCamelCase = {}
for layer in checkpoint_info.keys():
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = get_key_and_tensorstore_dict(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if curr_real_layer_name in all_layers:
__lowerCamelCase = content
else:
__lowerCamelCase = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
__lowerCamelCase = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
__lowerCamelCase = torch.tensor(UpperCamelCase__ )
__lowerCamelCase = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
__lowerCamelCase , __lowerCamelCase = rename_base_flax_keys(tuple(key.split('''/''' ) ) , UpperCamelCase__ )
__lowerCamelCase = '''/'''.join(UpperCamelCase__ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
__lowerCamelCase = os.path.join(
UpperCamelCase__ , weights_name.replace('''.bin''' , f"""-{len(UpperCamelCase__ )+1:05d}-of-???.bin""" ) )
rename_and_save_block(UpperCamelCase__ , UpperCamelCase__ )
sharded_state_dicts.append(current_block.keys() )
del current_block
__lowerCamelCase = {}
__lowerCamelCase = 0
__lowerCamelCase = raw_weights.to(getattr(UpperCamelCase__ , UpperCamelCase__ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
__lowerCamelCase = os.path.join(UpperCamelCase__ , weights_name.replace('''.bin''' , f"""-{len(UpperCamelCase__ )+1:05d}-of-???.bin""" ) )
rename_and_save_block(UpperCamelCase__ , UpperCamelCase__ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(UpperCamelCase__ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
__lowerCamelCase = {}
__lowerCamelCase = {}
for idx, shard in enumerate(UpperCamelCase__ ):
__lowerCamelCase = weights_name.replace(
'''.bin''' , f"""-{idx+1:05d}-of-{len(UpperCamelCase__ ):05d}.bin""" ) # len(sharded_state_dicts):05d}
__lowerCamelCase = os.path.join(UpperCamelCase__ , weights_name.replace('''.bin''' , f"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
__lowerCamelCase = shard
for key in shard:
__lowerCamelCase = shard_file
# Add the metadata
__lowerCamelCase = {'''total_size''': total_size}
__lowerCamelCase = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''w''' , encoding='''utf-8''' ) as f:
__lowerCamelCase = json.dumps(UpperCamelCase__ , indent=2 , sort_keys=UpperCamelCase__ ) + '''\n'''
f.write(UpperCamelCase__ )
return metadata, index
if __name__ == "__main__":
__UpperCAmelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--max_shard_size", default="10GB", required=False, help="Max shard size")
parser.add_argument("--dtype", default="bfloat16", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted",
type=str,
required=False,
help="Path to the output pytorch model.",
)
__UpperCAmelCase =parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def __lowerCAmelCase ( ) -> List[Any]:
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
__lowerCamelCase = SwitchTransformersConfig.from_pretrained('''google/switch-base-8''' )
config.save_pretrained('''/home/arthur_huggingface_co/transformers/switch_converted''' )
__lowerCamelCase = SwitchTransformersForConditionalGeneration.from_pretrained(
'''/home/arthur_huggingface_co/transformers/switch_converted''' , device_map='''auto''' )
__lowerCamelCase = TaTokenizer.from_pretrained('''t5-small''' )
__lowerCamelCase = '''A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'''
__lowerCamelCase = tokenizer(UpperCamelCase__ , return_tensors='''pt''' ).input_ids
__lowerCamelCase = model.generate(UpperCamelCase__ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 67 | '''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class a__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = '''ylacombe/bark-small'''
__lowerCamelCase = tempfile.mkdtemp()
__lowerCamelCase = '''en_speaker_1'''
__lowerCamelCase = '''This is a test string'''
__lowerCamelCase = '''speaker_embeddings_path.json'''
__lowerCamelCase = '''speaker_embeddings'''
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , **a : Dict ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.checkpoint , **a )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = BarkProcessor(tokenizer=a )
processor.save_pretrained(self.tmpdirname )
__lowerCamelCase = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
__lowerCamelCase = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
__lowerCamelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__lowerCamelCase = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
__lowerCamelCase = 35
__lowerCamelCase = 2
__lowerCamelCase = 8
__lowerCamelCase = {
'''semantic_prompt''': np.ones(a ),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
__lowerCamelCase = processor(text=self.input_string , voice_preset=a )
__lowerCamelCase = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(a , np.array([] ) ).tolist() )
# test loading voice preset from npz file
__lowerCamelCase = os.path.join(self.tmpdirname , '''file.npz''' )
np.savez(a , **a )
__lowerCamelCase = processor(text=self.input_string , voice_preset=a )
__lowerCamelCase = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(a , np.array([] ) ).tolist() )
# test loading voice preset from the hub
__lowerCamelCase = processor(text=self.input_string , voice_preset=self.voice_preset )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = BarkProcessor(tokenizer=a )
__lowerCamelCase = processor(text=self.input_string )
__lowerCamelCase = tokenizer(
self.input_string , padding='''max_length''' , max_length=2_56 , add_special_tokens=a , return_attention_mask=a , return_token_type_ids=a , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 67 | 1 |
'''simple docstring'''
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
__UpperCAmelCase =argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument(
"--original_config_file",
type=str,
required=True,
help="The YAML config file corresponding to the original architecture.",
)
parser.add_argument(
"--num_in_channels",
default=None,
type=int,
help="The number of input channels. If `None` number of input channels will be automatically inferred.",
)
parser.add_argument(
"--image_size",
default=5_1_2,
type=int,
help=(
"The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"
" Base. Use 768 for Stable Diffusion v2."
),
)
parser.add_argument(
"--extract_ema",
action="store_true",
help=(
"Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"
" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"
" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."
),
)
parser.add_argument(
"--upcast_attention",
action="store_true",
help=(
"Whether the attention computation should always be upcasted. This is necessary when running stable"
" diffusion 2.1."
),
)
parser.add_argument(
"--from_safetensors",
action="store_true",
help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.",
)
parser.add_argument(
"--to_safetensors",
action="store_true",
help="Whether to store pipeline in safetensors format or not.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
def __lowerCAmelCase ( UpperCamelCase__ ) -> Tuple:
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f"""could not parse string as bool {string}""" )
parser.add_argument(
"--use_linear_projection", help="Override for use linear projection", required=False, type=parse_bool
)
parser.add_argument("--cross_attention_dim", help="Override for cross attention_dim", required=False, type=int)
__UpperCAmelCase =parser.parse_args()
__UpperCAmelCase =download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 67 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase ={"configuration_vit_msn": ["VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMSNConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTMSNModel",
"ViTMSNForImageClassification",
"ViTMSNPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
__UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 67 | 1 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
__UpperCAmelCase =["gpt2"]
__UpperCAmelCase ="gpt2"
if is_tf_available():
class a__ ( tf.Module ):
def __init__( self : str , a : Union[str, Any] ):
"""simple docstring"""
super().__init__()
__lowerCamelCase = tokenizer
__lowerCamelCase = AutoConfig.from_pretrained(a )
__lowerCamelCase = TFGPTaLMHeadModel.from_config(a )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='''text''' ),) )
def SCREAMING_SNAKE_CASE__ ( self : str , a : Tuple ):
"""simple docstring"""
__lowerCamelCase = self.tokenizer(a )
__lowerCamelCase = tokenized['''input_ids'''].to_tensor()
__lowerCamelCase = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
__lowerCamelCase = self.model(input_ids=a , attention_mask=a )['''logits''']
return outputs
@require_tf
@require_keras_nlp
class a__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
super().setUp()
__lowerCamelCase = [GPTaTokenizer.from_pretrained(a ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
__lowerCamelCase = [TFGPTaTokenizer.from_pretrained(a ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
__lowerCamelCase = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
__lowerCamelCase = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
__lowerCamelCase = tokenizer([test_inputs] , return_tensors='''tf''' )
__lowerCamelCase = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
__lowerCamelCase = python_outputs[key].numpy()
__lowerCamelCase = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(a , tf.intaa ) == tf_outputs_values ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
__lowerCamelCase = tf.function(a )
for test_inputs in self.test_sentences:
__lowerCamelCase = tf.constant(a )
__lowerCamelCase = compiled_tokenizer(a )
__lowerCamelCase = tf_tokenizer(a )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
__lowerCamelCase = ModelToSave(tokenizer=a )
__lowerCamelCase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowerCamelCase = model.serving(a ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__lowerCamelCase = Path(a ) / '''saved.model'''
tf.saved_model.save(a , a , signatures={'''serving_default''': model.serving} )
__lowerCamelCase = tf.saved_model.load(a )
__lowerCamelCase = loaded_model.signatures['''serving_default'''](a )['''output_0''']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
__lowerCamelCase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowerCamelCase = tf_tokenizer(a ) # Build model with some sample inputs
__lowerCamelCase = tf_tokenizer.get_config()
__lowerCamelCase = TFGPTaTokenizer.from_config(a )
__lowerCamelCase = model_from_config(a )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
__lowerCamelCase = 12_31_23
for max_length in [3, 5, 10_24]:
__lowerCamelCase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowerCamelCase = tf_tokenizer(a , max_length=a )
__lowerCamelCase = out['''input_ids'''].numpy().shape[1]
assert out_length == max_length
| 67 | '''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
__UpperCAmelCase =True
except (ImportError, ModuleNotFoundError):
__UpperCAmelCase =False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def __lowerCAmelCase ( UpperCamelCase__ ) -> str:
re.sub('''<n>''' , '''''' , UpperCamelCase__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(UpperCamelCase__ ) )
| 67 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase ={
"configuration_xlm_roberta": [
"XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XLMRobertaConfig",
"XLMRobertaOnnxConfig",
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =["XLMRobertaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =["XLMRobertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMRobertaForCausalLM",
"XLMRobertaForMaskedLM",
"XLMRobertaForMultipleChoice",
"XLMRobertaForQuestionAnswering",
"XLMRobertaForSequenceClassification",
"XLMRobertaForTokenClassification",
"XLMRobertaModel",
"XLMRobertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMRobertaForCausalLM",
"TFXLMRobertaForMaskedLM",
"TFXLMRobertaForMultipleChoice",
"TFXLMRobertaForQuestionAnswering",
"TFXLMRobertaForSequenceClassification",
"TFXLMRobertaForTokenClassification",
"TFXLMRobertaModel",
"TFXLMRobertaPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxXLMRobertaForMaskedLM",
"FlaxXLMRobertaForCausalLM",
"FlaxXLMRobertaForMultipleChoice",
"FlaxXLMRobertaForQuestionAnswering",
"FlaxXLMRobertaForSequenceClassification",
"FlaxXLMRobertaForTokenClassification",
"FlaxXLMRobertaModel",
"FlaxXLMRobertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
__UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 67 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase ={
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json",
}
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Optional[int] ="gpt_neox_japanese"
def __init__( self : List[Any] , a : Tuple=3_20_00 , a : Dict=25_60 , a : Union[str, Any]=32 , a : Dict=32 , a : Dict=4 , a : Optional[Any]="gelu" , a : Any=1.00 , a : str=1_00_00 , a : List[str]=20_48 , a : str=0.02 , a : Union[str, Any]=1e-5 , a : Optional[Any]=True , a : str=3_19_96 , a : List[str]=3_19_99 , a : str=0.1 , a : Union[str, Any]=0.0 , **a : Optional[Any] , ):
"""simple docstring"""
super().__init__(bos_token_id=a , eos_token_id=a , **a )
__lowerCamelCase = vocab_size
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_multiple_size
__lowerCamelCase = hidden_act
__lowerCamelCase = rotary_pct
__lowerCamelCase = rotary_emb_base
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = use_cache
__lowerCamelCase = attention_dropout
__lowerCamelCase = hidden_dropout
| 67 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCAmelCase ={"configuration_van": ["VAN_PRETRAINED_CONFIG_ARCHIVE_MAP", "VanConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"VAN_PRETRAINED_MODEL_ARCHIVE_LIST",
"VanForImageClassification",
"VanModel",
"VanPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
__UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure)
| 67 | '''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> int:
while second != 0:
__lowerCamelCase = first & second
first ^= second
__lowerCamelCase = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase =int(input("Enter the first number: ").strip())
__UpperCAmelCase =int(input("Enter the second number: ").strip())
print(f'{add(first, second) = }')
| 67 | 1 |
'''simple docstring'''
import requests
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> None:
__lowerCamelCase = {'''Content-Type''': '''application/json'''}
__lowerCamelCase = requests.post(UpperCamelCase__ , json={'''text''': message_body} , headers=UpperCamelCase__ )
if response.status_code != 2_00:
__lowerCamelCase = (
'''Request to slack returned an error '''
f"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(UpperCamelCase__ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("<YOUR MESSAGE BODY>", "<SLACK CHANNEL URL>")
| 67 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase ={
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 67 | 1 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class a__ ( UpperCAmelCase__ ):
def __init__( self : List[Any] , a : List[Any] , a : Dict ):
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
__lowerCamelCase = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=a , scheduler=a )
@torch.no_grad()
def __call__( self : List[Any] , a : int = 1 , a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a : float = 0.0 , a : int = 50 , a : Optional[bool] = None , a : Optional[str] = "pil" , a : bool = True , ):
"""simple docstring"""
if isinstance(self.unet.config.sample_size , a ):
__lowerCamelCase = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
__lowerCamelCase = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(a , a ) and len(a ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(a )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
__lowerCamelCase = randn_tensor(a , generator=a , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(a )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__lowerCamelCase = self.unet(a , a ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__lowerCamelCase = self.scheduler.step(
a , a , a , eta=a , use_clipped_model_output=a , generator=a ).prev_sample
__lowerCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
__lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__lowerCamelCase = self.numpy_to_pil(a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a )
| 67 | '''simple docstring'''
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> None:
__lowerCamelCase = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ ), f"""{len(UpperCamelCase__ )} != {len(UpperCamelCase__ )}"""
dest_layers.load_state_dict(layers_to_copy.state_dict() )
__UpperCAmelCase ={
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
1_2: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 1_1],
4: [0, 4, 8, 1_1],
6: [0, 2, 4, 7, 9, 1_1],
9: [0, 1, 2, 4, 5, 7, 9, 1_0, 1_1],
1_2: list(range(1_2)),
},
1_6: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 1_5],
3: [0, 8, 1_5],
4: [0, 5, 1_0, 1_5],
6: [0, 3, 6, 9, 1_2, 1_5],
8: [0, 2, 4, 6, 8, 1_0, 1_2, 1_5],
9: [0, 1, 3, 5, 7, 9, 1_1, 1_3, 1_5],
1_2: [0, 1, 2, 3, 4, 5, 6, 7, 9, 1_1, 1_3, 1_5],
1_6: list(range(1_6)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
__UpperCAmelCase ={
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
1_2: {1: [1_1], 2: [5, 1_1], 3: [3, 7, 1_1], 6: [1, 3, 5, 8, 1_0, 1_1]},
1_6: {1: [1_5], 4: [4, 9, 1_2, 1_5], 8: [1, 3, 5, 7, 9, 1_1, 1_3, 1_5]},
}
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
try:
__lowerCamelCase = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
f"""no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"""
f""" {n_student}""" )
return list(range(UpperCamelCase__ ) )
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> List[int]:
if n_student > n_teacher:
raise ValueError(f"""Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}""" )
elif n_teacher == n_student:
return list(range(UpperCamelCase__ ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ = "student" , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__=False , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ , ) -> Tuple[PreTrainedModel, List[int], List[int]]:
__lowerCamelCase = '''encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'''
assert (e is not None) or (d is not None), _msg
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
AutoTokenizer.from_pretrained(UpperCamelCase__ ).save_pretrained(UpperCamelCase__ ) # purely for convenience
__lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase__ ).eval()
else:
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), f"""teacher must be a model or string got type {type(UpperCamelCase__ )}"""
__lowerCamelCase = teacher.config.to_diff_dict()
try:
__lowerCamelCase , __lowerCamelCase = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
__lowerCamelCase = teacher_e
if d is None:
__lowerCamelCase = teacher_d
init_kwargs.update({'''encoder_layers''': e, '''decoder_layers''': d} )
except AttributeError: # T5
if hasattr(teacher.config , '''num_encoder_layers''' ):
__lowerCamelCase , __lowerCamelCase = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
__lowerCamelCase , __lowerCamelCase = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
__lowerCamelCase = teacher_e
if d is None:
__lowerCamelCase = teacher_d
if hasattr(teacher.config , '''num_encoder_layers''' ):
init_kwargs.update({'''num_encoder_layers''': e, '''num_decoder_layers''': d} )
else:
init_kwargs.update({'''num_layers''': e, '''num_decoder_layers''': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(UpperCamelCase__ )
# Copy weights
__lowerCamelCase = teacher.config_class(**UpperCamelCase__ )
__lowerCamelCase = AutoModelForSeqaSeqLM.from_config(UpperCamelCase__ )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
__lowerCamelCase = student.load_state_dict(teacher.state_dict() , strict=UpperCamelCase__ )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
__lowerCamelCase , __lowerCamelCase = list(range(UpperCamelCase__ ) ), list(range(UpperCamelCase__ ) )
logger.info(
f"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"""
f""" {save_path}""" )
student.save_pretrained(UpperCamelCase__ )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
__lowerCamelCase = pick_layers_to_copy(UpperCamelCase__ , UpperCamelCase__ )
if d_layers_to_copy is None:
__lowerCamelCase = pick_layers_to_copy(UpperCamelCase__ , UpperCamelCase__ )
try:
if hasattr(
UpperCamelCase__ , '''prophetnet''' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , UpperCamelCase__ )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , UpperCamelCase__ )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , UpperCamelCase__ )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , UpperCamelCase__ )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , UpperCamelCase__ )
copy_layers(teacher.decoder.block , student.decoder.block , UpperCamelCase__ )
logger.info(
f"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}""" )
__lowerCamelCase = {
'''teacher_type''': teacher.config.model_type,
'''copied_encoder_layers''': e_layers_to_copy,
'''copied_decoder_layers''': d_layers_to_copy,
}
student.save_pretrained(UpperCamelCase__ )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 67 | 1 |
'''simple docstring'''
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
__UpperCAmelCase =[
"kernels/rwkv/wkv_cuda.cu",
"kernels/rwkv/wkv_op.cpp",
"kernels/deformable_detr/ms_deform_attn.h",
"kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh",
"models/graphormer/algos_graphormer.pyx",
]
def __lowerCAmelCase ( UpperCamelCase__ ) -> Tuple:
# Test all the extensions added in the setup
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
__UpperCAmelCase =argparse.ArgumentParser()
parser.add_argument("--check_lib", action="store_true", help="Whether to check the build or the actual package.")
__UpperCAmelCase =parser.parse_args()
if args.check_lib:
__UpperCAmelCase =importlib.import_module("transformers")
__UpperCAmelCase =Path(transformers_module.__file__).parent
else:
__UpperCAmelCase =Path.cwd() / "build/lib/transformers"
if not test_custom_files_are_present(transformers_path):
raise ValueError("The built release does not contain the custom files. Fix this before going further!")
| 67 | '''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
__UpperCAmelCase =["gpt2"]
__UpperCAmelCase ="gpt2"
if is_tf_available():
class a__ ( tf.Module ):
def __init__( self : str , a : Union[str, Any] ):
"""simple docstring"""
super().__init__()
__lowerCamelCase = tokenizer
__lowerCamelCase = AutoConfig.from_pretrained(a )
__lowerCamelCase = TFGPTaLMHeadModel.from_config(a )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='''text''' ),) )
def SCREAMING_SNAKE_CASE__ ( self : str , a : Tuple ):
"""simple docstring"""
__lowerCamelCase = self.tokenizer(a )
__lowerCamelCase = tokenized['''input_ids'''].to_tensor()
__lowerCamelCase = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
__lowerCamelCase = self.model(input_ids=a , attention_mask=a )['''logits''']
return outputs
@require_tf
@require_keras_nlp
class a__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
super().setUp()
__lowerCamelCase = [GPTaTokenizer.from_pretrained(a ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
__lowerCamelCase = [TFGPTaTokenizer.from_pretrained(a ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
__lowerCamelCase = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
__lowerCamelCase = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
__lowerCamelCase = tokenizer([test_inputs] , return_tensors='''tf''' )
__lowerCamelCase = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
__lowerCamelCase = python_outputs[key].numpy()
__lowerCamelCase = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(a , tf.intaa ) == tf_outputs_values ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
__lowerCamelCase = tf.function(a )
for test_inputs in self.test_sentences:
__lowerCamelCase = tf.constant(a )
__lowerCamelCase = compiled_tokenizer(a )
__lowerCamelCase = tf_tokenizer(a )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
__lowerCamelCase = ModelToSave(tokenizer=a )
__lowerCamelCase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowerCamelCase = model.serving(a ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__lowerCamelCase = Path(a ) / '''saved.model'''
tf.saved_model.save(a , a , signatures={'''serving_default''': model.serving} )
__lowerCamelCase = tf.saved_model.load(a )
__lowerCamelCase = loaded_model.signatures['''serving_default'''](a )['''output_0''']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
__lowerCamelCase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowerCamelCase = tf_tokenizer(a ) # Build model with some sample inputs
__lowerCamelCase = tf_tokenizer.get_config()
__lowerCamelCase = TFGPTaTokenizer.from_config(a )
__lowerCamelCase = model_from_config(a )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
__lowerCamelCase = 12_31_23
for max_length in [3, 5, 10_24]:
__lowerCamelCase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowerCamelCase = tf_tokenizer(a , max_length=a )
__lowerCamelCase = out['''input_ids'''].numpy().shape[1]
assert out_length == max_length
| 67 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.