code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , __a ) -> np.ndarray:
"""simple docstring"""
if (ksize % 2) == 0:
lowerCamelCase__: Optional[int] =ksize + 1
lowerCamelCase__: Tuple =np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(__a ):
for x in range(__a ):
# distance from center
lowerCamelCase__: Optional[int] =x - ksize // 2
lowerCamelCase__: str =y - ksize // 2
# degree to radiant
lowerCamelCase__: str =theta / 180 * np.pi
lowerCamelCase__: str =np.cos(_theta )
lowerCamelCase__: List[Any] =np.sin(_theta )
# get kernel x
lowerCamelCase__: int =cos_theta * px + sin_theta * py
# get kernel y
lowerCamelCase__: Optional[int] =-sin_theta * px + cos_theta * py
# fill kernel
lowerCamelCase__: Tuple =np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__A = imread("../image_data/lena.jpg")
# turn image in gray scale value
__A = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__A = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
__A = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__A = out / out.max() * 255
__A = out.astype(np.uinta)
imshow("Original", gray)
imshow("Gabor filter with 20x20 mask and 6 directions", out)
waitKey(0)
| 10 |
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = (DDPMParallelScheduler,)
def SCREAMING_SNAKE_CASE_ (self : Any , **UpperCAmelCase_ : Any) ->Any:
'''simple docstring'''
lowerCamelCase__: Any ={
"num_train_timesteps": 1_000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**UpperCAmelCase_)
return config
def SCREAMING_SNAKE_CASE_ (self : int) ->Dict:
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[int]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=UpperCAmelCase_ , beta_end=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[int]:
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Optional[Any]:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Any) ->Tuple:
'''simple docstring'''
self.check_over_configs(thresholding=UpperCAmelCase_)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCAmelCase_ , prediction_type=UpperCAmelCase_ , sample_max_value=UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ (self : Any) ->Optional[int]:
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->int:
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->str:
'''simple docstring'''
lowerCamelCase__: Dict =self.scheduler_classes[0]
lowerCamelCase__: Tuple =self.get_scheduler_config()
lowerCamelCase__: Any =scheduler_class(**UpperCAmelCase_)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487) - 0.0_0979)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999) - 0.02)) < 1E-5
def SCREAMING_SNAKE_CASE_ (self : Any) ->str:
'''simple docstring'''
lowerCamelCase__: int =self.scheduler_classes[0]
lowerCamelCase__: Tuple =self.get_scheduler_config()
lowerCamelCase__: Tuple =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: str =len(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =self.dummy_model()
lowerCamelCase__: int =self.dummy_sample_deter
lowerCamelCase__: Union[str, Any] =self.dummy_sample_deter + 0.1
lowerCamelCase__: Optional[Any] =self.dummy_sample_deter - 0.1
lowerCamelCase__: Optional[Any] =samplea.shape[0]
lowerCamelCase__: List[Any] =torch.stack([samplea, samplea, samplea] , dim=0)
lowerCamelCase__: Union[str, Any] =torch.arange(UpperCAmelCase_)[0:3, None].repeat(1 , UpperCAmelCase_)
lowerCamelCase__: Optional[int] =model(samples.flatten(0 , 1) , timesteps.flatten(0 , 1))
lowerCamelCase__: Tuple =scheduler.batch_step_no_noise(UpperCAmelCase_ , timesteps.flatten(0 , 1) , samples.flatten(0 , 1))
lowerCamelCase__: List[str] =torch.sum(torch.abs(UpperCAmelCase_))
lowerCamelCase__: Any =torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 1153.1833) < 1E-2
assert abs(result_mean.item() - 0.5005) < 1E-3
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Any =self.scheduler_classes[0]
lowerCamelCase__: Optional[Any] =self.get_scheduler_config()
lowerCamelCase__: Optional[int] =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =len(UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =self.dummy_model()
lowerCamelCase__: List[Any] =self.dummy_sample_deter
lowerCamelCase__: int =torch.manual_seed(0)
for t in reversed(range(UpperCAmelCase_)):
# 1. predict noise residual
lowerCamelCase__: Tuple =model(UpperCAmelCase_ , UpperCAmelCase_)
# 2. predict previous mean of sample x_t-1
lowerCamelCase__: Optional[Any] =scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_).prev_sample
lowerCamelCase__: Any =pred_prev_sample
lowerCamelCase__: Any =torch.sum(torch.abs(UpperCAmelCase_))
lowerCamelCase__: List[str] =torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 258.9606) < 1E-2
assert abs(result_mean.item() - 0.3372) < 1E-3
def SCREAMING_SNAKE_CASE_ (self : int) ->Any:
'''simple docstring'''
lowerCamelCase__: Tuple =self.scheduler_classes[0]
lowerCamelCase__: Any =self.get_scheduler_config(prediction_type="v_prediction")
lowerCamelCase__: Any =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: str =len(UpperCAmelCase_)
lowerCamelCase__: str =self.dummy_model()
lowerCamelCase__: str =self.dummy_sample_deter
lowerCamelCase__: Dict =torch.manual_seed(0)
for t in reversed(range(UpperCAmelCase_)):
# 1. predict noise residual
lowerCamelCase__: Union[str, Any] =model(UpperCAmelCase_ , UpperCAmelCase_)
# 2. predict previous mean of sample x_t-1
lowerCamelCase__: Dict =scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_).prev_sample
lowerCamelCase__: List[str] =pred_prev_sample
lowerCamelCase__: List[Any] =torch.sum(torch.abs(UpperCAmelCase_))
lowerCamelCase__: Tuple =torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 202.0296) < 1E-2
assert abs(result_mean.item() - 0.2631) < 1E-3
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: str =self.scheduler_classes[0]
lowerCamelCase__: Union[str, Any] =self.get_scheduler_config()
lowerCamelCase__: Any =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: List[Any] =[100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =scheduler.timesteps
for i, timestep in enumerate(UpperCAmelCase_):
if i == len(UpperCAmelCase_) - 1:
lowerCamelCase__: Dict =-1
else:
lowerCamelCase__: Union[str, Any] =timesteps[i + 1]
lowerCamelCase__: Tuple =scheduler.previous_timestep(UpperCAmelCase_)
lowerCamelCase__: str =prev_t.item()
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Tuple =self.scheduler_classes[0]
lowerCamelCase__: List[Any] =self.get_scheduler_config()
lowerCamelCase__: Dict =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =[100, 87, 50, 51, 0]
with self.assertRaises(UpperCAmelCase_ , msg="`custom_timesteps` must be in descending order."):
scheduler.set_timesteps(timesteps=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Dict =self.scheduler_classes[0]
lowerCamelCase__: Any =self.get_scheduler_config()
lowerCamelCase__: int =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: Optional[int] =[100, 87, 50, 1, 0]
lowerCamelCase__: int =len(UpperCAmelCase_)
with self.assertRaises(UpperCAmelCase_ , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`."):
scheduler.set_timesteps(num_inference_steps=UpperCAmelCase_ , timesteps=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: Tuple =self.scheduler_classes[0]
lowerCamelCase__: Optional[Any] =self.get_scheduler_config()
lowerCamelCase__: Optional[Any] =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: Dict =[scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCAmelCase_ , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=UpperCAmelCase_)
| 10 | 1 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
_SCREAMING_SNAKE_CASE = 5_0_0_0_0_0
_SCREAMING_SNAKE_CASE = os.path.split(__file__)
_SCREAMING_SNAKE_CASE = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def lowercase( UpperCamelCase_ , **UpperCamelCase_ ) -> Dict:
'''simple docstring'''
UpperCamelCase = dataset.map(**__snake_case )
@get_duration
def lowercase( UpperCamelCase_ , **UpperCamelCase_ ) -> int:
'''simple docstring'''
UpperCamelCase = dataset.filter(**__snake_case )
def lowercase( ) -> List[str]:
'''simple docstring'''
UpperCamelCase = {"num examples": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
UpperCamelCase = generate_example_dataset(
os.path.join(__snake_case , """dataset.arrow""" ) , __snake_case , num_examples=__snake_case )
UpperCamelCase = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=__snake_case )
def tokenize(UpperCamelCase_ ):
return tokenizer(examples["""text"""] )
UpperCamelCase = map(__snake_case )
UpperCamelCase = map(__snake_case , batched=__snake_case )
UpperCamelCase = map(__snake_case , function=lambda UpperCamelCase_ : None , batched=__snake_case )
with dataset.formatted_as(type="""numpy""" ):
UpperCamelCase = map(__snake_case , function=lambda UpperCamelCase_ : None , batched=__snake_case )
with dataset.formatted_as(type="""pandas""" ):
UpperCamelCase = map(__snake_case , function=lambda UpperCamelCase_ : None , batched=__snake_case )
with dataset.formatted_as(type="""torch""" , columns="""numbers""" ):
UpperCamelCase = map(__snake_case , function=lambda UpperCamelCase_ : None , batched=__snake_case )
with dataset.formatted_as(type="""tensorflow""" , columns="""numbers""" ):
UpperCamelCase = map(__snake_case , function=lambda UpperCamelCase_ : None , batched=__snake_case )
UpperCamelCase = map(__snake_case , function=__snake_case , batched=__snake_case )
UpperCamelCase = filter(__snake_case )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(__snake_case , """wb""" ) as f:
f.write(json.dumps(__snake_case ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 364 | from math import pi
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> float:
'''simple docstring'''
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(9_0, 1_0))
| 165 | 0 |
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Union[str, Any] = LxmertConfig.from_json_file(__UpperCamelCase )
print(f'''Building PyTorch model from configuration: {config}''' )
_A : Dict = LxmertForPreTraining(__UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(__UpperCamelCase,__UpperCamelCase,__UpperCamelCase )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict(),__UpperCamelCase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_snake_case = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 26 |
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
__a :Optional[Any] = logging.get_logger(__name__)
__a :Dict[Optional[str], Type[Formatter]] = {}
__a :Dict[Optional[str], str] = {}
__a :Dict[Optional[str], Exception] = {}
def __snake_case ( __UpperCamelCase : type ,__UpperCamelCase : Optional[str] ,__UpperCamelCase : Optional[List[str]] = None ,):
"""simple docstring"""
A_ = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' )
A_ = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' )
A_ = format_type
def __snake_case ( __UpperCamelCase : Exception ,__UpperCamelCase : Optional[str] ,__UpperCamelCase : Optional[List[str]] = None ):
"""simple docstring"""
A_ = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
A_ = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['python'])
_register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow'])
_register_formatter(NumpyFormatter, 'numpy', aliases=['np'])
_register_formatter(PandasFormatter, 'pandas', aliases=['pd'])
_register_formatter(CustomFormatter, 'custom')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch'])
else:
__a :List[Any] = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.')
_register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch'])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, 'tensorflow', aliases=['tf'])
else:
__a :List[str] = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.')
_register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf'])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, 'jax', aliases=[])
else:
__a :Tuple = ValueError('JAX needs to be installed to be able to return JAX arrays.')
_register_unavailable_formatter(_jax_error, 'jax', aliases=[])
def __snake_case ( __UpperCamelCase : Optional[str] ):
"""simple docstring"""
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def __snake_case ( __UpperCamelCase : Optional[str] ,**__UpperCamelCase : List[Any] ):
"""simple docstring"""
A_ = get_format_type_from_alias(__UpperCamelCase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**__UpperCamelCase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' ) | 312 | 0 |
"""simple docstring"""
from __future__ import annotations
__SCREAMING_SNAKE_CASE : List[str] = 8.9_88E9 # units = N * m^s * C^-2
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> dict[str, float]:
snake_case_ = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if distance < 0:
raise ValueError("""Distance cannot be negative""" )
if force == 0:
snake_case_ = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
snake_case_ = abs(_SCREAMING_SNAKE_CASE ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
snake_case_ = abs(_SCREAMING_SNAKE_CASE ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
snake_case_ = (COULOMBS_CONSTANT * charge_product / abs(_SCREAMING_SNAKE_CASE )) ** 0.5
return {"distance": distance}
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 233 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
class __A :
'''simple docstring'''
def __init__( self : Any , UpperCAmelCase_ : int ) ->None:
"""simple docstring"""
snake_case_ = value
snake_case_ = None
snake_case_ = None
class __A :
'''simple docstring'''
def __init__( self : List[Any] , UpperCAmelCase_ : Node ) ->None:
"""simple docstring"""
snake_case_ = tree
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : Node | None ) ->int:
"""simple docstring"""
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : str ) ->Iterator[int]:
"""simple docstring"""
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 233 | 1 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase__ = {
"vocab_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/vocab.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/vocab.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/vocab.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/vocab.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/vocab.json",
},
"merges_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/merges.txt",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/merges.txt",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/merges.txt",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/merges.txt",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/merges.txt",
},
"tokenizer_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/tokenizer.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/tokenizer.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/tokenizer.json",
},
}
UpperCAmelCase__ = {
"gpt2": 1024,
"gpt2-medium": 1024,
"gpt2-large": 1024,
"gpt2-xl": 1024,
"distilgpt2": 1024,
}
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case = ['''input_ids''', '''attention_mask''']
__snake_case = GPTaTokenizer
def __init__( self : Optional[Any] , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : int=None , __UpperCAmelCase : Dict=None , __UpperCAmelCase : List[Any]="<|endoftext|>" , __UpperCAmelCase : Optional[Any]="<|endoftext|>" , __UpperCAmelCase : Optional[int]="<|endoftext|>" , __UpperCAmelCase : List[Any]=False , **__UpperCAmelCase : Optional[Any] , ) ->List[str]:
"""simple docstring"""
super().__init__(
__UpperCAmelCase , __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , unk_token=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , **__UpperCAmelCase , )
a = kwargs.pop('''add_bos_token''' , __UpperCAmelCase )
a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __UpperCAmelCase ) != add_prefix_space:
a = getattr(__UpperCAmelCase , pre_tok_state.pop('''type''' ) )
a = add_prefix_space
a = pre_tok_class(**__UpperCAmelCase )
a = add_prefix_space
def __lowerCAmelCase ( self : Dict , *__UpperCAmelCase : Optional[int] , **__UpperCAmelCase : Dict ) ->BatchEncoding:
"""simple docstring"""
a = kwargs.get('''is_split_into_words''' , __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def __lowerCAmelCase ( self : int , *__UpperCAmelCase : Any , **__UpperCAmelCase : Any ) ->BatchEncoding:
"""simple docstring"""
a = kwargs.get('''is_split_into_words''' , __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) ->Tuple[str]:
"""simple docstring"""
a = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
def __lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : "Conversation" ) ->List[int]:
"""simple docstring"""
a = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) + [self.eos_token_id] )
if len(__UpperCAmelCase ) > self.model_max_length:
a = input_ids[-self.model_max_length :]
return input_ids
| 0 |
'''simple docstring'''
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
a : str = getLogger(__name__)
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = 8 , __magic_name__ = 1024 , __magic_name__="val" , __magic_name__=None , __magic_name__=False , __magic_name__="summarization" , __magic_name__=None , __magic_name__=1 , __magic_name__ = None , __magic_name__="" , **__magic_name__ , ):
'''simple docstring'''
UpperCAmelCase : List[Any] = str(__magic_name__ )
assert local_rank is not None
torch.distributed.init_process_group(backend="nccl" , rank=__magic_name__ )
UpperCAmelCase : List[str] = Path(__magic_name__ )
UpperCAmelCase : Dict = save_dir.joinpath(F"rank_{local_rank}_output.json" )
torch.cuda.set_device(__magic_name__ )
UpperCAmelCase : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(__magic_name__ ).cuda()
if fpaa:
UpperCAmelCase : int = model.half()
# determine if we need to increase num_beams
use_task_specific_params(__magic_name__ , __magic_name__ ) # update config with task specific params
UpperCAmelCase : Dict = generate_kwargs.pop("num_beams" , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
UpperCAmelCase : Optional[Any] = num_return_sequences
UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(__magic_name__ )
logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
if max_source_length is None:
UpperCAmelCase : Any = tokenizer.model_max_length
if prefix is None:
UpperCAmelCase : Tuple = prefix or getattr(model.config , "prefix" , "" ) or ""
UpperCAmelCase : Dict = SeqaSeqDataset(
__magic_name__ , __magic_name__ , __magic_name__ , max_target_length=1024 , type_path=__magic_name__ , n_obs=__magic_name__ , prefix=__magic_name__ , **__magic_name__ , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
UpperCAmelCase : int = ds.make_sortish_sampler(__magic_name__ , distributed=__magic_name__ , add_extra_examples=__magic_name__ , shuffle=__magic_name__ )
UpperCAmelCase : List[Any] = DataLoader(__magic_name__ , sampler=__magic_name__ , batch_size=__magic_name__ , collate_fn=ds.collate_fn )
UpperCAmelCase : Any = []
for batch in tqdm(__magic_name__ ):
UpperCAmelCase : List[Any] = model.generate(
input_ids=batch["input_ids"].to(model.device ) , attention_mask=batch["attention_mask"].to(model.device ) , num_return_sequences=__magic_name__ , num_beams=__magic_name__ , **__magic_name__ , )
UpperCAmelCase : Optional[int] = tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ )
UpperCAmelCase : int = batch["ids"]
if num_return_sequences > 1:
UpperCAmelCase : List[Any] = chunks(__magic_name__ , __magic_name__ ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(__magic_name__ ):
results.append({"pred": pred, "id": ids[i].item()} )
save_json(__magic_name__ , __magic_name__ )
return results, sampler.num_replicas
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : str = argparse.ArgumentParser(
epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" )
parser.add_argument("--data_dir" , type=__magic_name__ , help="like cnn_dm/test.source" )
parser.add_argument(
"--model_name" , type=__magic_name__ , help="like facebook/bart-large-cnn,t5-base, etc." , default="sshleifer/distilbart-xsum-12-3" , )
parser.add_argument("--save_dir" , type=__magic_name__ , help="where to save" , default="tmp_gen" )
parser.add_argument("--max_source_length" , type=__magic_name__ , default=__magic_name__ )
parser.add_argument(
"--type_path" , type=__magic_name__ , default="test" , help="which subset to evaluate typically train/val/test" )
parser.add_argument("--task" , type=__magic_name__ , default="summarization" , help="used for task_specific_params + metrics" )
parser.add_argument("--bs" , type=__magic_name__ , default=8 , required=__magic_name__ , help="batch size" )
parser.add_argument(
"--local_rank" , type=__magic_name__ , default=-1 , required=__magic_name__ , help="should be passed by distributed.launch" )
parser.add_argument(
"--n_obs" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ , help="How many observations. Defaults to all." )
parser.add_argument(
"--num_return_sequences" , type=__magic_name__ , default=1 , required=__magic_name__ , help="How many sequences to return" )
parser.add_argument(
"--sync_timeout" , type=__magic_name__ , default=600 , required=__magic_name__ , help="How long should master process wait for other processes to finish." , )
parser.add_argument("--src_lang" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ )
parser.add_argument("--tgt_lang" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ )
parser.add_argument(
"--prefix" , type=__magic_name__ , required=__magic_name__ , default=__magic_name__ , help="will be added to the begininng of src examples" )
parser.add_argument("--fp16" , action="store_true" )
parser.add_argument("--debug" , action="store_true" )
UpperCAmelCase : Union[str, Any] = time.time()
UpperCAmelCase , UpperCAmelCase : Dict = parser.parse_known_args()
UpperCAmelCase : Tuple = parse_numeric_n_bool_cl_kwargs(__magic_name__ )
if generate_kwargs and args.local_rank <= 0:
print(F"parsed the following generate kwargs: {generate_kwargs}" )
UpperCAmelCase : Union[str, Any] = Path(args.save_dir + "_tmp" )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ ) # this handles locking.
UpperCAmelCase : List[Any] = list(json_save_dir.glob("rank_*.json" ) )
if intermediate_files:
raise ValueError(F"Found files at {json_save_dir} please move or remove them." )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
UpperCAmelCase : Optional[Any] = {}
if args.src_lang is not None:
UpperCAmelCase : List[str] = args.src_lang
if args.tgt_lang is not None:
UpperCAmelCase : Dict = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=__magic_name__ )
UpperCAmelCase , UpperCAmelCase : str = eval_data_dir(
args.data_dir , __magic_name__ , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=__magic_name__ , **__magic_name__ , )
if args.local_rank <= 0:
UpperCAmelCase : List[str] = Path(args.save_dir )
save_dir.mkdir(exist_ok=__magic_name__ )
UpperCAmelCase : str = gather_results_from_each_node(__magic_name__ , __magic_name__ , args.sync_timeout )
UpperCAmelCase : Dict = combine_partial_results(__magic_name__ )
if args.num_return_sequences > 1:
UpperCAmelCase : int = save_dir.joinpath("pseudolabel_results.json" )
print(F"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/" )
save_json(__magic_name__ , __magic_name__ )
return
UpperCAmelCase : Dict = Path(args.data_dir ).joinpath(args.type_path + ".target" )
with open(__magic_name__ ) as f:
UpperCAmelCase : Dict = [x.rstrip() for x in f.readlines()][: len(__magic_name__ )]
# Calculate metrics, save metrics, and save _generations.txt
UpperCAmelCase : Optional[int] = "translation" in args.task
UpperCAmelCase : str = calculate_bleu if calc_bleu else calculate_rouge
UpperCAmelCase : Tuple = "bleu" if calc_bleu else "rouge"
UpperCAmelCase : Dict = score_fn(__magic_name__ , __magic_name__ )
UpperCAmelCase : Any = len(__magic_name__ )
UpperCAmelCase : Union[str, Any] = time.time() - start_time
UpperCAmelCase : Dict = round(runtime / metrics["n_obs"] , 4 )
UpperCAmelCase : Optional[Any] = num_replicas
# TODO(@stas00): add whatever metadata to metrics
UpperCAmelCase : Dict = save_dir.joinpath(F"{args.type_path}_{metric_name}.json" )
save_json(__magic_name__ , __magic_name__ , indent=__magic_name__ )
print(__magic_name__ )
write_txt_file(__magic_name__ , save_dir.joinpath(F"{args.type_path}_generations.txt" ) )
if args.debug:
write_txt_file(__magic_name__ , save_dir.joinpath(F"{args.type_path}.target" ) )
else:
shutil.rmtree(__magic_name__ )
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Tuple = []
for partial_result in partial_results:
records.extend(__magic_name__ )
UpperCAmelCase : Optional[Any] = sorted(__magic_name__ , key=lambda __magic_name__ : x["id"] )
UpperCAmelCase : List[Any] = [x["pred"] for x in records]
return preds
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = time.time()
logger.info("waiting for all nodes to finish" )
UpperCAmelCase : Union[str, Any] = None
while (time.time() - start_wait) < timeout:
UpperCAmelCase : Dict = list(save_dir.glob("rank_*.json" ) )
if len(__magic_name__ ) < num_replicas:
continue
try:
# make sure all json files are fully saved
UpperCAmelCase : List[str] = lmap(__magic_name__ , __magic_name__ )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("Rank 0 gave up on waiting for other processes" )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 311 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
A : List[str] = {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/config.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/config.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/config.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/config.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/config.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/config.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json',
}
class __A( a ):
snake_case_ = '''albert'''
def __init__( self , _snake_case=30_000 , _snake_case=128 , _snake_case=4_096 , _snake_case=12 , _snake_case=1 , _snake_case=64 , _snake_case=16_384 , _snake_case=1 , _snake_case="gelu_new" , _snake_case=0 , _snake_case=0 , _snake_case=512 , _snake_case=2 , _snake_case=0.02 , _snake_case=1E-12 , _snake_case=0.1 , _snake_case="absolute" , _snake_case=0 , _snake_case=2 , _snake_case=3 , **_snake_case , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
__a = vocab_size
__a = embedding_size
__a = hidden_size
__a = num_hidden_layers
__a = num_hidden_groups
__a = num_attention_heads
__a = inner_group_num
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = classifier_dropout_prob
__a = position_embedding_type
class __A( a ):
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
if self.task == "multiple-choice":
__a = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__a = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] ) | 353 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __A( a ):
@slow
@require_torch
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
__a = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' )
__a = BertTokenizer.from_pretrained('''bert-base-uncased''' )
__a = bertabert.config.encoder.vocab_size
__a = tokenizer.sep_token_id
__a = tokenizer.cls_token_id
__a = 128
__a = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' )
__a = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' )
__a = train_dataset.select(range(32 ) )
__a = val_dataset.select(range(16 ) )
__a = 4
def _map_to_encoder_decoder_inputs(_snake_case ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__a = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=_snake_case , max_length=512 )
__a = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=_snake_case , max_length=128 )
__a = inputs.input_ids
__a = inputs.attention_mask
__a = outputs.input_ids
__a = outputs.input_ids.copy()
__a = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels''']
]
__a = outputs.attention_mask
assert all(len(_snake_case ) == 512 for x in inputs.input_ids )
assert all(len(_snake_case ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_snake_case ):
__a = pred.label_ids
__a = pred.predictions
# all unnecessary tokens are removed
__a = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
__a = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
__a = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_snake_case ) )] ) / len(_snake_case )
return {"accuracy": accuracy}
# map train dataset
__a = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_snake_case , batch_size=_snake_case , remove_columns=['''article''', '''highlights'''] , )
train_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
# same for validation dataset
__a = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_snake_case , batch_size=_snake_case , remove_columns=['''article''', '''highlights'''] , )
val_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
__a = self.get_auto_remove_tmp_dir()
__a = SeqaSeqTrainingArguments(
output_dir=_snake_case , per_device_train_batch_size=_snake_case , per_device_eval_batch_size=_snake_case , predict_with_generate=_snake_case , evaluation_strategy='''steps''' , do_train=_snake_case , do_eval=_snake_case , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
__a = SeqaSeqTrainer(
model=_snake_case , args=_snake_case , compute_metrics=_compute_metrics , train_dataset=_snake_case , eval_dataset=_snake_case , tokenizer=_snake_case , )
# start training
trainer.train() | 33 | 0 |
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class snake_case__ (A__ , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase :int = "ssube/stable-diffusion-x4-upscaler-onnx"
def SCREAMING_SNAKE_CASE__( self , __lowercase=0 ) -> List[Any]:
"""simple docstring"""
a__ : List[Any] = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(__lowercase ) )
a__ : List[Any] = torch.manual_seed(__lowercase )
a__ : List[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
a__ : Optional[int] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=__lowercase )
a__ : Optional[int] = self.get_dummy_inputs()
a__ : Union[str, Any] = pipe(**__lowercase ).images
a__ : Optional[int] = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a__ : int = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
a__ : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
a__ : List[str] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
a__ : List[str] = self.get_dummy_inputs()
a__ : int = pipe(**__lowercase ).images
a__ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a__ : int = np.array(
[0.6_8_9_8_8_9_2, 0.5_9_2_4_0_5_5_6, 0.5_2_4_9_9_5_2_7, 0.5_8_8_6_6_2_1_5, 0.5_2_2_5_8_2_3_5, 0.5_2_5_7_2_7_1_5, 0.6_2_4_1_4_4_7_3, 0.6_1_7_4_3_8_7, 0.6_2_1_4_9_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE__( self ) -> int:
"""simple docstring"""
a__ : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
a__ : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowercase )
a__ : Dict = self.get_dummy_inputs()
a__ : List[str] = pipe(**__lowercase ).images
a__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a__ : Optional[int] = np.array(
[0.7_6_5_9_2_7_8, 0.7_6_4_3_7_6_6_4, 0.7_5_5_7_9_1_0_7, 0.7_6_9_1_1_1_6, 0.7_7_6_6_6_9_8_6, 0.7_7_2_7_6_7_2, 0.7_7_5_8_6_6_4, 0.7_8_1_2_2_2_6, 0.7_6_9_4_2_5_1_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE__( self ) -> int:
"""simple docstring"""
a__ : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
a__ : Optional[int] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowercase )
a__ : str = self.get_dummy_inputs()
a__ : List[Any] = pipe(**__lowercase ).images
a__ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a__ : List[Any] = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE__( self ) -> int:
"""simple docstring"""
a__ : Any = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
a__ : List[Any] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowercase )
a__ : Optional[int] = self.get_dummy_inputs()
a__ : str = pipe(**__lowercase ).images
a__ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a__ : Optional[Any] = np.array(
[0.7_7_4_2_4_4_9_6, 0.7_7_3_6_0_1, 0.7_6_4_5_2_8_8, 0.7_7_6_9_5_9_8, 0.7_7_7_2_7_3_9, 0.7_7_3_8_6_8_8, 0.7_8_1_8_7_2_3_3, 0.7_7_8_7_9_5_8_4, 0.7_6_7_0_4_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def SCREAMING_SNAKE_CASE__( self ) -> Any:
"""simple docstring"""
a__ : Dict = ort.SessionOptions()
a__ : List[Any] = False
return options
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
a__ : Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
a__ : int = init_image.resize((1_2_8, 1_2_8) )
# using the PNDM scheduler by default
a__ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowercase )
a__ : Optional[Any] = """A fantasy landscape, trending on artstation"""
a__ : str = torch.manual_seed(0 )
a__ : Union[str, Any] = pipe(
prompt=__lowercase , image=__lowercase , guidance_scale=7.5 , num_inference_steps=1_0 , generator=__lowercase , output_type="""np""" , )
a__ : List[str] = output.images
a__ : Tuple = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
a__ : Dict = np.array([0.4_8_8_3, 0.4_9_4_7, 0.4_9_8_0, 0.4_9_7_5, 0.4_9_8_2, 0.4_9_8_0, 0.5_0_0_0, 0.5_0_0_6, 0.4_9_7_2] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def SCREAMING_SNAKE_CASE__( self ) -> Any:
"""simple docstring"""
a__ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
a__ : Optional[Any] = init_image.resize((1_2_8, 1_2_8) )
a__ : str = LMSDiscreteScheduler.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , subfolder="""scheduler""" )
a__ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , scheduler=__lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowercase )
a__ : Tuple = """A fantasy landscape, trending on artstation"""
a__ : str = torch.manual_seed(0 )
a__ : Tuple = pipe(
prompt=__lowercase , image=__lowercase , guidance_scale=7.5 , num_inference_steps=2_0 , generator=__lowercase , output_type="""np""" , )
a__ : Optional[int] = output.images
a__ : Optional[int] = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
a__ : List[Any] = np.array(
[0.5_0_1_7_3_7_5_3, 0.5_0_2_2_3_3_5_6, 0.5_0_2_0_3_9, 0.5_0_2_3_3_0_3_6, 0.5_0_2_3_7_2_5, 0.5_0_2_2_6_0_1, 0.5_0_1_8_7_5_8, 0.5_0_2_3_4_0_8_5, 0.5_0_2_4_1_5_6_6] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 170 |
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def lowerCAmelCase_ ( _lowercase : float , _lowercase : float , _lowercase : bool = False) -> list[float]:
"""simple docstring"""
if radian_mode:
return [magnitude * cos(_lowercase), magnitude * sin(_lowercase)]
return [magnitude * cos(radians(_lowercase)), magnitude * sin(radians(_lowercase))]
def lowerCAmelCase_ ( _lowercase : NDArray[floataa] , _lowercase : NDArray[floataa] , _lowercase : float = 10**-1) -> bool:
"""simple docstring"""
a__ : NDArray[floataa] = cross(_lowercase , _lowercase)
a__ : float = sum(_lowercase)
return abs(_lowercase) < eps
if __name__ == "__main__":
# Test to check if it works
_lowercase : int =array(
[
polar_force(718.4, 180 - 30),
polar_force(879.54, 45),
polar_force(100, -90),
]
)
_lowercase : NDArray[floataa] =array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
_lowercase : Union[str, Any] =array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
_lowercase : Dict =array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
_lowercase : Tuple =array([[0, -2000], [0, -1200], [0, 1_5600], [0, -1_2400]])
_lowercase : Any =array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 170 | 1 |
'''simple docstring'''
import copy
import random
from transformers import CLIPTokenizer
class A ( _a ):
def __init__( self : str , *lowerCAmelCase_ : int , **lowerCAmelCase_ : List[str] ) -> List[Any]:
"""simple docstring"""
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
_a = {}
def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : Tuple , *lowerCAmelCase_ : Any , **lowerCAmelCase_ : Tuple ) -> str:
"""simple docstring"""
_a = super().add_tokens(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ )
if num_added_tokens == 0:
raise ValueError(
F'The tokenizer already contains the token {placeholder_token}. Please pass a different'
''' `placeholder_token` that is not already in the tokenizer.''' )
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Dict , *lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple=1 , **lowerCAmelCase_ : int ) -> Any:
"""simple docstring"""
_a = []
if num_vec_per_token == 1:
self.try_adding_tokens(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ )
output.append(lowerCAmelCase_ )
else:
_a = []
for i in range(lowerCAmelCase_ ):
_a = placeholder_token + F'_{i}'
self.try_adding_tokens(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ )
output.append(lowerCAmelCase_ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F'The tokenizer already has placeholder token {token} that can get confused with'
F' {placeholder_token}keep placeholder tokens independent' )
_a = output
def __lowerCAmelCase ( self : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : Optional[int]=1.0 ) -> Tuple:
"""simple docstring"""
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_a = []
for i in range(len(lowerCAmelCase_ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=lowerCAmelCase_ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
_a = self.token_map[placeholder_token]
_a = tokens[: 1 + int(len(lowerCAmelCase_ ) * prop_tokens_to_load )]
if vector_shuffle:
_a = copy.copy(lowerCAmelCase_ )
random.shuffle(lowerCAmelCase_ )
_a = text.replace(lowerCAmelCase_ , ''' '''.join(lowerCAmelCase_ ) )
return text
def __call__( self : List[str] , lowerCAmelCase_ : str , *lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int]=False , lowerCAmelCase_ : Union[str, Any]=1.0 , **lowerCAmelCase_ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return super().__call__(
self.replace_placeholder_tokens_in_text(
lowerCAmelCase_ , vector_shuffle=lowerCAmelCase_ , prop_tokens_to_load=lowerCAmelCase_ ) , *lowerCAmelCase_ , **lowerCAmelCase_ , )
def __lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : int , *lowerCAmelCase_ : str , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : List[Any]=1.0 , **lowerCAmelCase_ : Union[str, Any] ) -> Dict:
"""simple docstring"""
return super().encode(
self.replace_placeholder_tokens_in_text(
lowerCAmelCase_ , vector_shuffle=lowerCAmelCase_ , prop_tokens_to_load=lowerCAmelCase_ ) , *lowerCAmelCase_ , **lowerCAmelCase_ , )
| 179 |
'''simple docstring'''
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class A ( _a ):
def __init__( self : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str]=10_24 , lowerCAmelCase_ : Optional[Any]=10_24 , lowerCAmelCase_ : Tuple=3.6 ) -> List[Any]:
"""simple docstring"""
_a = tokenizer
_a = tokenizer.bos_token_id
_a = dataset
_a = seq_length
_a = seq_length * chars_per_token * num_of_sequences
def __iter__( self : Any ) -> int:
"""simple docstring"""
_a = iter(self.dataset )
_a = True
while more_examples:
_a , _a = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(lowerCAmelCase_ )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
_a = False
break
_a = tokenizer(lowerCAmelCase_ , truncation=lowerCAmelCase_ )['''input_ids''']
_a = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(lowerCAmelCase_ ) , self.seq_length ):
_a = all_token_ids[i : i + self.seq_length]
if len(lowerCAmelCase_ ) == self.seq_length:
yield torch.tensor(lowerCAmelCase_ )
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
_a = {'''streaming''': True}
_a = load_dataset(args.dataset_name , split='''train''' , **UpperCamelCase )
_a = ConstantLengthDataset(UpperCamelCase , UpperCamelCase , seq_length=args.seq_length )
_a = DataLoader(UpperCamelCase , batch_size=args.batch_size )
return eval_dataloader
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
model.eval()
_a = []
for step, batch in enumerate(UpperCamelCase ):
with torch.no_grad():
_a = model(UpperCamelCase , labels=UpperCamelCase )
_a = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(UpperCamelCase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
_a = torch.mean(torch.cat(UpperCamelCase ) )
try:
_a = torch.exp(UpperCamelCase )
except OverflowError:
_a = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
_snake_case : List[str] = Accelerator()
# Parse configuration
_snake_case : List[str] = HfArgumentParser(EvaluationArguments)
_snake_case : Optional[int] = parser.parse_args()
set_seed(args.seed)
# Logging
_snake_case : Any = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
_snake_case : Dict = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
_snake_case : Optional[Any] = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
_snake_case : List[str] = create_dataloader(args)
# Prepare everything with our `accelerator`.
_snake_case , _snake_case : Optional[int] = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
_snake_case , _snake_case : int = evaluate(args)
logger.info(F'''loss/eval: {eval_loss}, perplexity: {perplexity}''')
| 179 | 1 |
"""simple docstring"""
import math
def _UpperCAmelCase ( __lowerCamelCase : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _UpperCAmelCase ( __lowerCamelCase : int = 1_00_01 ) -> int:
try:
_snake_case = int(__lowerCamelCase )
except (TypeError, ValueError):
raise TypeError('''Parameter nth must be int or castable to int.''' ) from None
if nth <= 0:
raise ValueError('''Parameter nth must be greater than or equal to one.''' )
_snake_case = []
_snake_case = 2
while len(__lowerCamelCase ) < nth:
if is_prime(__lowerCamelCase ):
primes.append(__lowerCamelCase )
num += 1
else:
num += 1
return primes[len(__lowerCamelCase ) - 1]
if __name__ == "__main__":
print(F"{solution() = }")
| 288 |
"""simple docstring"""
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
UpperCAmelCase__ = 'http://www.mocksite.com/file1.txt'
UpperCAmelCase__ = '"text": ["foo", "foo"]'
UpperCAmelCase__ = '6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8'
class lowerCAmelCase__ :
__a = 200
__a = {"""Content-Length""": """100"""}
__a = {}
def lowercase ( self : List[str] , **_lowerCamelCase : List[str] ):
return [bytes(_lowerCamelCase , '''utf-8''' )]
def _UpperCAmelCase ( *__lowerCamelCase : List[str] , **__lowerCamelCase : Dict ) -> Dict:
return MockResponse()
@pytest.mark.parametrize('''urls_type''' , [str, list, dict] )
def _UpperCAmelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : str ) -> int:
import requests
monkeypatch.setattr(__lowerCamelCase , '''request''' , __lowerCamelCase )
_snake_case = URL
if issubclass(__lowerCamelCase , __lowerCamelCase ):
_snake_case = url
elif issubclass(__lowerCamelCase , __lowerCamelCase ):
_snake_case = [url]
elif issubclass(__lowerCamelCase , __lowerCamelCase ):
_snake_case = {'''train''': url}
_snake_case = '''dummy'''
_snake_case = '''downloads'''
_snake_case = tmp_path
_snake_case = DownloadConfig(
cache_dir=os.path.join(__lowerCamelCase , __lowerCamelCase ) , use_etag=__lowerCamelCase , )
_snake_case = DownloadManager(dataset_name=__lowerCamelCase , download_config=__lowerCamelCase )
_snake_case = dl_manager.download(__lowerCamelCase )
_snake_case = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
_snake_case = [downloaded_paths]
_snake_case = [urls]
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
assert "train" in downloaded_paths.keys()
_snake_case = downloaded_paths.values()
_snake_case = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(__lowerCamelCase , __lowerCamelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
_snake_case = Path(__lowerCamelCase )
_snake_case = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
_snake_case = downloaded_path.read_text()
assert content == CONTENT
_snake_case = downloaded_path.with_suffix('''.json''' )
assert metadata_downloaded_path.exists()
_snake_case = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('''paths_type''' , [str, list, dict] )
def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : Optional[int] ) -> int:
_snake_case = str(__lowerCamelCase )
if issubclass(__lowerCamelCase , __lowerCamelCase ):
_snake_case = filename
elif issubclass(__lowerCamelCase , __lowerCamelCase ):
_snake_case = [filename]
elif issubclass(__lowerCamelCase , __lowerCamelCase ):
_snake_case = {'''train''': filename}
_snake_case = '''dummy'''
_snake_case = xz_file.parent
_snake_case = '''extracted'''
_snake_case = DownloadConfig(
cache_dir=__lowerCamelCase , use_etag=__lowerCamelCase , )
_snake_case = DownloadManager(dataset_name=__lowerCamelCase , download_config=__lowerCamelCase )
_snake_case = dl_manager.extract(__lowerCamelCase )
_snake_case = paths
for extracted_paths in [extracted_paths]:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
_snake_case = [extracted_paths]
_snake_case = [paths]
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
assert "train" in extracted_paths.keys()
_snake_case = extracted_paths.values()
_snake_case = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(__lowerCamelCase , __lowerCamelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
_snake_case = Path(__lowerCamelCase )
_snake_case = extracted_path.parts
assert parts[-1] == hash_url_to_filename(__lowerCamelCase , etag=__lowerCamelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
_snake_case = extracted_path.read_text()
_snake_case = text_file.read_text()
assert extracted_file_content == expected_file_content
def _UpperCAmelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] ) -> Dict:
assert path.endswith('''.jsonl''' )
for num_items, line in enumerate(__lowerCamelCase , start=1 ):
_snake_case = json.loads(line.decode('''utf-8''' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('''archive_jsonl''' , ['''tar_jsonl_path''', '''zip_jsonl_path'''] )
def _UpperCAmelCase ( __lowerCamelCase : Dict , __lowerCamelCase : str ) -> Dict:
_snake_case = request.getfixturevalue(__lowerCamelCase )
_snake_case = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(__lowerCamelCase ) , start=1 ):
_test_jsonl(__lowerCamelCase , __lowerCamelCase )
assert num_jsonl == 2
@pytest.mark.parametrize('''archive_nested_jsonl''' , ['''tar_nested_jsonl_path''', '''zip_nested_jsonl_path'''] )
def _UpperCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : List[Any] ) -> Tuple:
_snake_case = request.getfixturevalue(__lowerCamelCase )
_snake_case = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(__lowerCamelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(__lowerCamelCase ) , start=1 ):
_test_jsonl(__lowerCamelCase , __lowerCamelCase )
assert num_tar == 1
assert num_jsonl == 2
def _UpperCAmelCase ( __lowerCamelCase : Tuple ) -> List[Any]:
_snake_case = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(__lowerCamelCase ) , start=1 ):
assert os.path.basename(__lowerCamelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 288 | 1 |
"""simple docstring"""
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
SCREAMING_SNAKE_CASE : Tuple = "\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n"
SCREAMING_SNAKE_CASE : Tuple = "\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n"
SCREAMING_SNAKE_CASE : Optional[Any] = "\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for 'cvit-mkb-clsr' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for 'cvit-mkb-clsr' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"precision\": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wnli') # 'wnli' or any of [\"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wiki-ner')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'cvit-mkb-clsr')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'precision@10': 1.0}\n\n"
def __UpperCAmelCase ( snake_case_ : Dict , snake_case_ : int ) -> Optional[int]:
"""simple docstring"""
return float((preds == labels).mean() )
def __UpperCAmelCase ( snake_case_ : str , snake_case_ : Dict ) -> Dict:
"""simple docstring"""
_lowerCAmelCase = simple_accuracy(lowerCamelCase__ , lowerCamelCase__ )
_lowerCAmelCase = float(fa_score(y_true=lowerCamelCase__ , y_pred=lowerCamelCase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def __UpperCAmelCase ( snake_case_ : Optional[int] , snake_case_ : int ) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase = np.array(lowerCamelCase__ )
_lowerCAmelCase = np.array(lowerCamelCase__ )
_lowerCAmelCase = en_sentvecs.shape[0]
# mean centering
_lowerCAmelCase = en_sentvecs - np.mean(lowerCamelCase__ , axis=0 )
_lowerCAmelCase = in_sentvecs - np.mean(lowerCamelCase__ , axis=0 )
_lowerCAmelCase = cdist(lowerCamelCase__ , lowerCamelCase__ , """cosine""" )
_lowerCAmelCase = np.array(range(lowerCamelCase__ ) )
_lowerCAmelCase = sim.argsort(axis=1 )[:, :10]
_lowerCAmelCase = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
def A__ (self ):
'''simple docstring'''
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", """
"""\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", """
"""\"wiki-ner\"]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" )
if self.config_name != """cvit-mkb-clsr"""
else datasets.Sequence(datasets.Value("""float32""" ) ),
"""references""": datasets.Value("""int64""" )
if self.config_name != """cvit-mkb-clsr"""
else datasets.Sequence(datasets.Value("""float32""" ) ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if self.config_name != """cvit-mkb-clsr""" else None , )
def A__ (self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(lowerCamelCase , lowerCamelCase )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(lowerCamelCase , lowerCamelCase )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(lowerCamelCase , lowerCamelCase )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", """
"""\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", """
"""\"wiki-ner\"]""" )
| 350 |
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ) -> int:
"""simple docstring"""
def count_of_possible_combinations(snake_case_ : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(snake_case_ )
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ) -> int:
"""simple docstring"""
def count_of_possible_combinations_with_dp_array(
snake_case_ : int , snake_case_ : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
_lowerCAmelCase = sum(
count_of_possible_combinations_with_dp_array(target - item , snake_case_ )
for item in array )
_lowerCAmelCase = answer
return answer
_lowerCAmelCase = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(snake_case_ , snake_case_ )
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ) -> int:
"""simple docstring"""
_lowerCAmelCase = [0] * (target + 1)
_lowerCAmelCase = 1
for i in range(1 , target + 1 ):
for j in range(snake_case_ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : Tuple = 3
SCREAMING_SNAKE_CASE : Any = 5
SCREAMING_SNAKE_CASE : Optional[int] = [1, 2, 5]
print(combination_sum_iv(n, array, target)) | 317 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCamelCase : Dict = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 18 |
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
A__ = 0
A__ = len(SCREAMING_SNAKE_CASE__ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
A__ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(SCREAMING_SNAKE_CASE__ ):
return None
A__ = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
A__ = left
A__ = point
elif point > right:
A__ = right
A__ = point
else:
if item < current_item:
A__ = point - 1
else:
A__ = point + 1
return None
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> str:
'''simple docstring'''
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
A__ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(SCREAMING_SNAKE_CASE__ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif point > right:
return interpolation_search_by_recursion(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , point - 1 )
else:
return interpolation_search_by_recursion(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , point + 1 , SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple ) -> Tuple:
'''simple docstring'''
if collection != sorted(SCREAMING_SNAKE_CASE__ ):
raise ValueError('Collection must be ascending sorted' )
return True
if __name__ == "__main__":
import sys
lowercase_ = 0
if debug == 1:
lowercase_ = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("Sequence must be ascending sorted to apply interpolation search")
lowercase_ = 67
lowercase_ = interpolation_search(collection, target)
if result is not None:
print(f"""{target} found at positions: {result}""")
else:
print("Not found")
| 7 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase: Optional[Any] = logging.get_logger(__name__)
_lowercase: Any = {
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
__A = "git_vision_model"
def __init__(self , lowerCamelCase_=768 , lowerCamelCase_=3072 , lowerCamelCase_=12 , lowerCamelCase_=12 , lowerCamelCase_=3 , lowerCamelCase_=224 , lowerCamelCase_=16 , lowerCamelCase_="quick_gelu" , lowerCamelCase_=1E-5 , lowerCamelCase_=0.0 , lowerCamelCase_=0.02 , **lowerCamelCase_ , ):
"""simple docstring"""
super().__init__(**lowerCamelCase_ )
a = hidden_size
a = intermediate_size
a = num_hidden_layers
a = num_attention_heads
a = num_channels
a = patch_size
a = image_size
a = initializer_range
a = attention_dropout
a = layer_norm_eps
a = hidden_act
@classmethod
def UpperCamelCase_ (cls , lowerCamelCase_ , **lowerCamelCase_ ):
"""simple docstring"""
cls._set_token_in_kwargs(lowerCamelCase_ )
a , a = cls.get_config_dict(lowerCamelCase_ , **lowerCamelCase_ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("model_type" ) == "git":
a = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCamelCase_ , **lowerCamelCase_ )
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
__A = "git"
def __init__(self , lowerCamelCase_=None , lowerCamelCase_=30522 , lowerCamelCase_=768 , lowerCamelCase_=6 , lowerCamelCase_=12 , lowerCamelCase_=3072 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=1024 , lowerCamelCase_=0.02 , lowerCamelCase_=1E-1_2 , lowerCamelCase_=0 , lowerCamelCase_="absolute" , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_=101 , lowerCamelCase_=102 , lowerCamelCase_=None , **lowerCamelCase_ , ):
"""simple docstring"""
super().__init__(bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , pad_token_id=lowerCamelCase_ , **lowerCamelCase_ )
if vision_config is None:
a = {}
logger.info("vision_config is None. initializing the GitVisionConfig with default values." )
a = GitVisionConfig(**lowerCamelCase_ )
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = hidden_act
a = intermediate_size
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = initializer_range
a = layer_norm_eps
a = position_embedding_type
a = use_cache
a = tie_word_embeddings
a = num_image_with_embedding
a = bos_token_id
a = eos_token_id
def UpperCamelCase_ (self ):
"""simple docstring"""
a = copy.deepcopy(self.__dict__ )
a = self.vision_config.to_dict()
a = self.__class__.model_type
return output
| 350 |
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase: str = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class _lowercase ( lowerCAmelCase, unittest.TestCase ):
"""simple docstring"""
__A = XLMProphetNetTokenizer
__A = False
__A = True
def UpperCamelCase_ (self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
a = XLMProphetNetTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = "[PAD]"
a = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ) , lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "[PAD]" )
self.assertEqual(vocab_keys[1] , "[CLS]" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(lowerCamelCase_ ) , 1012 )
def UpperCamelCase_ (self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1012 )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = XLMProphetNetTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
a = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCamelCase_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
a = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCamelCase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
a = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
a = tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"[UNK]",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"[UNK]",
".",
] , )
@cached_property
def UpperCamelCase_ (self ):
"""simple docstring"""
return XLMProphetNetTokenizer.from_pretrained("microsoft/xprophetnet-large-wiki100-cased" )
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
a = "Hello World!"
a = [35389, 6672, 49, 2]
self.assertListEqual(lowerCamelCase_ , self.big_tokenizer.encode(lowerCamelCase_ ) )
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
a = {"input_ids": [[11073, 82783, 18, 26, 82783, 549, 51540, 248, 17209, 1301, 217, 20, 215186, 1325, 147, 17209, 1301, 217, 20, 56370, 53, 122020, 20, 16477, 27, 87355, 4548, 20, 4728, 78392, 17, 159969, 18, 26, 24491, 629, 15, 538, 22704, 5439, 15, 2788, 24491, 9885, 15, 43534, 605, 15, 814, 18403, 33200, 29, 15, 43534, 24458, 12410, 111, 24966, 83669, 9637, 144068, 26, 850, 22346, 27, 147, 24966, 83669, 83490, 26, 39113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 122020, 115785, 34, 816, 1339, 46887, 18, 147, 53905, 1951, 42238, 41170, 17732, 834, 436, 15, 27523, 98733, 217, 147, 5542, 4981, 930, 17347, 16, 2], [20091, 629, 94, 82786, 58, 490, 20, 1528, 84, 53905, 344, 80592, 110128, 18822, 5267, 1306, 62, 152537, 308, 7997, 401, 124427, 549, 35442, 225, 109, 15055, 25748, 147, 7119, 43712, 34, 767, 135366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63784, 119466, 17, 147808, 88214, 18, 656, 81, 32, 3296, 10280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase_ , model_name="microsoft/xprophetnet-large-wiki100-cased" , revision="1acad1643ddd54a44df6a1b797ada8373685d90e" , )
| 71 | 0 |
'''simple docstring'''
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
__UpperCAmelCase = Path(__file__).resolve().parents[3] / """src"""
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
__UpperCAmelCase = {"""base""": """patrickvonplaten/wav2vec2_tiny_random""", """robust""": """patrickvonplaten/wav2vec2_tiny_random_robust"""}
__UpperCAmelCase = """zero2"""
__UpperCAmelCase = """zero3"""
__UpperCAmelCase = [ZEROa, ZEROa]
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = parameterized.to_safe_name("""_""".join(str(lowerCamelCase_ ) for x in param.args ) )
return f'''{func.__name__}_{param_based_name}'''
# Cartesian-product of zero stages with models to test
__UpperCAmelCase = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
@parameterized.expand(lowerCamelCase_ , name_func=lowerCamelCase_ )
def lowerCamelCase_ ( self : int , lowerCamelCase_ : int , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
self.run_and_check(
stage=lowerCamelCase_ , model=lowerCamelCase_ , distributed=lowerCamelCase_ , fpaa=lowerCamelCase_ , )
@require_torch_multi_gpu
@parameterized.expand(lowerCamelCase_ , name_func=lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Tuple , lowerCamelCase_ : str ):
'''simple docstring'''
self.run_and_check(
stage=lowerCamelCase_ , model=lowerCamelCase_ , distributed=lowerCamelCase_ , fpaa=lowerCamelCase_ , )
@parameterized.expand(lowerCamelCase_ , name_func=lowerCamelCase_ )
def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[Any] ):
'''simple docstring'''
self.run_and_check(
stage=lowerCamelCase_ , model=lowerCamelCase_ , distributed=lowerCamelCase_ , fpaa=lowerCamelCase_ , )
@require_torch_multi_gpu
@parameterized.expand(lowerCamelCase_ , name_func=lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : List[str] ):
'''simple docstring'''
self.run_and_check(
stage=lowerCamelCase_ , model=lowerCamelCase_ , distributed=lowerCamelCase_ , fpaa=lowerCamelCase_ , )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Any ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : str , lowerCamelCase_ : str , lowerCamelCase_ : int = 10 , lowerCamelCase_ : bool = True , lowerCamelCase_ : bool = True , lowerCamelCase_ : bool = True , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = models[model]
SCREAMING_SNAKE_CASE : Optional[int] = self.run_trainer(
stage=lowerCamelCase_ , model_name=lowerCamelCase_ , eval_steps=lowerCamelCase_ , num_train_epochs=1 , distributed=lowerCamelCase_ , fpaa=lowerCamelCase_ , )
self.do_checks(lowerCamelCase_ )
return output_dir
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : str , lowerCamelCase_ : str , lowerCamelCase_ : int = 10 , lowerCamelCase_ : int = 1 , lowerCamelCase_ : bool = True , lowerCamelCase_ : bool = True , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.get_auto_remove_tmp_dir("""./xxx""" , after=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = f'''
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(lowerCamelCase_ )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
'''.split()
if fpaa:
args.extend(["""--fp16"""] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
SCREAMING_SNAKE_CASE : Optional[int] = f'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split()
SCREAMING_SNAKE_CASE : Optional[Any] = [f'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py''']
SCREAMING_SNAKE_CASE : Optional[int] = self.get_launcher(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowerCamelCase_ , env=self.get_env() )
return output_dir
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Any=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = min(2 , get_gpu_count() ) if distributed else 1
return f'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split()
| 323 |
'''simple docstring'''
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase__ :
"""simple docstring"""
@staticmethod
def lowerCamelCase_ ( *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : List[str] ):
'''simple docstring'''
pass
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : str , lowerCamelCase_ : int , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = DepthEstimationPipeline(model=lowerCamelCase_ , image_processor=lowerCamelCase_ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = depth_estimator("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
self.assertEqual({"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )} , lowerCamelCase_ )
import datasets
SCREAMING_SNAKE_CASE : List[str] = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" )
SCREAMING_SNAKE_CASE : Any = depth_estimator(
[
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
] )
self.assertEqual(
[
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
] , lowerCamelCase_ , )
@require_tf
@unittest.skip("""Depth estimation is not implemented in TF""" )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
@slow
@require_torch
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = """Intel/dpt-large"""
SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline("""depth-estimation""" , model=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = depth_estimator("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
SCREAMING_SNAKE_CASE : str = hashimage(outputs["""depth"""] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].min().item() ) , 2.662 )
@require_torch
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
self.skipTest("""There is not hf-internal-testing tiny model for either GLPN nor DPT""" )
| 323 | 1 |
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
_lowercase : List[str] = random.Random()
def snake_case__ ( __lowerCamelCase : Any , __lowerCamelCase : Dict=1.0 , __lowerCamelCase : str=None , __lowerCamelCase : Dict=None ):
"""simple docstring"""
if rng is None:
lowerCamelCase__ : str =global_rng
lowerCamelCase__ : List[Any] =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : str, lowerCamelCase : Dict, lowerCamelCase : Any=7, lowerCamelCase : Optional[int]=400, lowerCamelCase : Tuple=2000, lowerCamelCase : Dict=1, lowerCamelCase : Dict=0.0, lowerCamelCase : List[str]=1_6000, lowerCamelCase : int=True, lowerCamelCase : str=True, )-> List[str]:
lowerCamelCase__ : Dict =parent
lowerCamelCase__ : str =batch_size
lowerCamelCase__ : List[str] =min_seq_length
lowerCamelCase__ : Optional[Any] =max_seq_length
lowerCamelCase__ : Union[str, Any] =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCamelCase__ : Optional[int] =feature_size
lowerCamelCase__ : Optional[Any] =padding_value
lowerCamelCase__ : List[Any] =sampling_rate
lowerCamelCase__ : List[str] =return_attention_mask
lowerCamelCase__ : Any =do_normalize
def snake_case ( self : Tuple )-> Tuple:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def snake_case ( self : Dict, lowerCamelCase : str=False, lowerCamelCase : Optional[Any]=False )-> str:
def _flatten(lowerCamelCase : Dict ):
return list(itertools.chain(*lowerCamelCase ) )
if equal_length:
lowerCamelCase__ : int =floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowerCamelCase__ : Dict =[
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff )
]
if numpify:
lowerCamelCase__ : Any =[np.asarray(lowerCamelCase ) for x in speech_inputs]
return speech_inputs
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ):
_a = WavaVecaFeatureExtractor
def snake_case ( self : Optional[int] )-> int:
lowerCamelCase__ : Union[str, Any] =WavaVecaFeatureExtractionTester(self )
def snake_case ( self : Union[str, Any], lowerCamelCase : Any )-> Optional[Any]:
self.assertTrue(np.all(np.mean(lowerCamelCase, axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCamelCase, axis=0 ) - 1 ) < 1E-3 ) )
def snake_case ( self : Dict )-> str:
# Tests that all call wrap to encode_plus and batch_encode_plus
lowerCamelCase__ : Optional[Any] =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCamelCase__ : Any =[floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
lowerCamelCase__ : List[Any] =[np.asarray(lowerCamelCase ) for speech_input in speech_inputs]
# Test not batched input
lowerCamelCase__ : Union[str, Any] =feat_extract(speech_inputs[0], return_tensors='''np''' ).input_values
lowerCamelCase__ : List[Any] =feat_extract(np_speech_inputs[0], return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(lowerCamelCase, lowerCamelCase, atol=1E-3 ) )
# Test batched
lowerCamelCase__ : List[Any] =feat_extract(lowerCamelCase, return_tensors='''np''' ).input_values
lowerCamelCase__ : Optional[int] =feat_extract(lowerCamelCase, return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase, lowerCamelCase ):
self.assertTrue(np.allclose(lowerCamelCase, lowerCamelCase, atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowerCamelCase__ : Optional[Any] =[floats_list((1, x) )[0] for x in (800, 800, 800)]
lowerCamelCase__ : Optional[Any] =np.asarray(lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =feat_extract(lowerCamelCase, return_tensors='''np''' ).input_values
lowerCamelCase__ : Union[str, Any] =feat_extract(lowerCamelCase, return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase, lowerCamelCase ):
self.assertTrue(np.allclose(lowerCamelCase, lowerCamelCase, atol=1E-3 ) )
def snake_case ( self : Optional[int] )-> Tuple:
lowerCamelCase__ : Union[str, Any] =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase__ : List[Any] =[floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
lowerCamelCase__ : Dict =['''longest''', '''max_length''', '''do_not_pad''']
lowerCamelCase__ : Any =[None, 1600, None]
for max_length, padding in zip(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ : int =feat_extract(lowerCamelCase, padding=lowerCamelCase, max_length=lowerCamelCase, return_tensors='''np''' )
lowerCamelCase__ : Any =processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def snake_case ( self : Any )-> Optional[Any]:
lowerCamelCase__ : List[str] =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase__ : Optional[int] =range(800, 1400, 200 )
lowerCamelCase__ : Optional[Any] =[floats_list((1, x) )[0] for x in lengths]
lowerCamelCase__ : Optional[int] =['''longest''', '''max_length''', '''do_not_pad''']
lowerCamelCase__ : List[Any] =[None, 1600, None]
for max_length, padding in zip(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] =feat_extract(lowerCamelCase, max_length=lowerCamelCase, padding=lowerCamelCase )
lowerCamelCase__ : List[Any] =processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def snake_case ( self : str )-> Tuple:
lowerCamelCase__ : List[Any] =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase__ : Union[str, Any] =[floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
lowerCamelCase__ : str =feat_extract(
lowerCamelCase, truncation=lowerCamelCase, max_length=1000, padding='''max_length''', return_tensors='''np''' )
lowerCamelCase__ : str =processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def snake_case ( self : List[Any] )-> List[str]:
lowerCamelCase__ : Tuple =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase__ : Tuple =[floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
lowerCamelCase__ : Any =feat_extract(
lowerCamelCase, truncation=lowerCamelCase, max_length=1000, padding='''longest''', return_tensors='''np''' )
lowerCamelCase__ : Union[str, Any] =processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
lowerCamelCase__ : Optional[int] =[floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
lowerCamelCase__ : Any =feat_extract(
lowerCamelCase, truncation=lowerCamelCase, max_length=2000, padding='''longest''', return_tensors='''np''' )
lowerCamelCase__ : List[str] =processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
@require_torch
def snake_case ( self : Dict )-> str:
import torch
lowerCamelCase__ : List[Any] =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase__ : Optional[int] =np.random.rand(100 ).astype(np.floataa )
lowerCamelCase__ : Any =np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCamelCase__ : Any =feature_extractor.pad([{'''input_values''': inputs}], return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowerCamelCase__ : Tuple =feature_extractor.pad([{'''input_values''': inputs}], return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def snake_case ( self : int )-> List[Any]:
# this test makes sure that models that are using
# group norm don't have their feature extractor return the
# attention_mask
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
lowerCamelCase__ : Optional[int] =WavaVecaConfig.from_pretrained(lowerCamelCase )
lowerCamelCase__ : List[Any] =WavaVecaFeatureExtractor.from_pretrained(lowerCamelCase )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask, config.feat_extract_norm == '''layer''' )
| 352 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =[0] * len(__lowerCamelCase )
lowerCamelCase__ : List[Any] =[]
lowerCamelCase__ : List[Any] =[1] * len(__lowerCamelCase )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__lowerCamelCase ) ):
if indegree[i] == 0:
queue.append(__lowerCamelCase )
while queue:
lowerCamelCase__ : Tuple =queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
lowerCamelCase__ : Optional[Any] =long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__lowerCamelCase )
print(max(__lowerCamelCase ) )
# Adjacency list of Graph
_lowercase : Optional[Any] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 272 | 0 |
import string
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str ) -> Any:
"""simple docstring"""
for key in range(len(string.ascii_uppercase ) ):
UpperCamelCase :Optional[Any] = ''''''
for symbol in message:
if symbol in string.ascii_uppercase:
UpperCamelCase :str = string.ascii_uppercase.find(UpperCamelCase__ )
UpperCamelCase :Any = num - key
if num < 0:
UpperCamelCase :List[Any] = num + len(string.ascii_uppercase )
UpperCamelCase :Tuple = translated + string.ascii_uppercase[num]
else:
UpperCamelCase :Dict = translated + symbol
print(f"""Decryption using Key #{key}: {translated}""" )
def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase :Tuple = input("""Encrypted message: """ )
UpperCamelCase :Tuple = message.upper()
decrypt(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 38 |
'''simple docstring'''
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
a_ = datasets.logging.get_logger(__name__)
a_ = '\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n'
a_ = '\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project\'s README at https://github.com/google-research/bleurt#readme for more information.\n'
a_ = '\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n \'scores\': List of scores.\nExamples:\n\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> bleurt = datasets.load_metric("bleurt")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results["scores"]])\n [1.03, 1.04]\n'
a_ = {
'bleurt-tiny-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip',
'bleurt-tiny-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip',
'bleurt-base-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip',
'bleurt-base-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip',
'bleurt-large-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip',
'bleurt-large-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip',
'BLEURT-20-D3': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip',
'BLEURT-20-D6': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip',
'BLEURT-20-D12': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip',
'BLEURT-20': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip',
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
def __magic_name__ ( self : Any ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/google-research/bleurt''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/google-research/bleurt'''] , reference_urls=['''https://github.com/google-research/bleurt''', '''https://arxiv.org/abs/2004.04696'''] , )
def __magic_name__ ( self : str , __lowercase : Any ) -> List[Any]:
# check that config name specifies a valid BLEURT model
if self.config_name == "default":
logger.warning(
'''Using default BLEURT-Base checkpoint for sequence maximum length 128. '''
'''You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').''' )
SCREAMING_SNAKE_CASE__ : Tuple ='''bleurt-base-128'''
if self.config_name.lower() in CHECKPOINT_URLS:
SCREAMING_SNAKE_CASE__ : Dict =self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
SCREAMING_SNAKE_CASE__ : Dict =self.config_name.upper()
else:
raise KeyError(
F"{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}" )
# download the model checkpoint specified by self.config_name and set up the scorer
SCREAMING_SNAKE_CASE__ : List[str] =dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
SCREAMING_SNAKE_CASE__ : Any =score.BleurtScorer(os.path.join(__lowercase , __lowercase ) )
def __magic_name__ ( self : Union[str, Any] , __lowercase : Any , __lowercase : Dict ) -> Tuple:
SCREAMING_SNAKE_CASE__ : int =self.scorer.score(references=__lowercase , candidates=__lowercase )
return {"scores": scores} | 152 | 0 |
"""simple docstring"""
import operator as op
def lowerCamelCase__ ( a ) -> Optional[Any]:
_A: str = []
_A: List[str] = lambda a , a : int(x / y ) # noqa: E731 integer division operation
_A: Optional[int] = {
'''^''': op.pow,
'''*''': op.mul,
'''/''': div,
'''+''': op.add,
'''-''': op.sub,
} # operators & their respective operation
# print table header
print('''Symbol'''.center(8 ) , '''Action'''.center(12 ) , '''Stack''' , sep=''' | ''' )
print('''-''' * (30 + len(__UpperCAmelCase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(__UpperCAmelCase ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ('''push(''' + x + ''')''').ljust(12 ) , ''','''.join(__UpperCAmelCase ) , sep=''' | ''' )
else:
_A: List[Any] = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8 ) , ('''pop(''' + b + ''')''').ljust(12 ) , ''','''.join(__UpperCAmelCase ) , sep=''' | ''' )
_A: str = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8 ) , ('''pop(''' + a + ''')''').ljust(12 ) , ''','''.join(__UpperCAmelCase ) , sep=''' | ''' )
stack.append(
str(opr[x](int(__UpperCAmelCase ) , int(__UpperCAmelCase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ('''push(''' + a + x + b + ''')''').ljust(12 ) , ''','''.join(__UpperCAmelCase ) , sep=''' | ''' , )
return int(stack[0] )
if __name__ == "__main__":
UpperCAmelCase__ : str = input('\n\nEnter a Postfix Equation (space separated) = ').split(' ')
print('\n\tResult = ', solve(Postfix))
| 352 |
from __future__ import annotations
UpperCAmelCase__ : List[str] = list[list[int]]
# assigning initial values to the grid
UpperCAmelCase__ : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
UpperCAmelCase__ : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def lowerCamelCase__ ( a , a , a , a ) -> bool:
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def lowerCamelCase__ ( a ) -> tuple[int, int] | None:
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def lowerCamelCase__ ( a ) -> Matrix | None:
if location := find_empty_location(a ):
_A , _A: Optional[Any] = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(a , a , a , a ):
_A: str = digit
if sudoku(a ) is not None:
return grid
_A: Tuple = 0
return None
def lowerCamelCase__ ( a ) -> None:
for row in grid:
for cell in row:
print(a , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('\nExample grid:\n' + '=' * 20)
print_solution(example_grid)
print('\nExample grid solution:')
UpperCAmelCase__ : int = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('Cannot find a solution.')
| 301 | 0 |
from __future__ import annotations
import os
from collections.abc import Mapping
_A = tuple[int, int]
class A :
def __init__( self, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = vertices
lowerCAmelCase_ = {
(min(__UpperCAmelCase ), max(__UpperCAmelCase )): weight for edge, weight in edges.items()
}
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
lowerCAmelCase_ = weight
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = Graph({min(self.vertices )}, {} )
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
while len(subgraph.vertices ) < len(self.vertices ):
lowerCAmelCase_ = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
lowerCAmelCase_ = edge
lowerCAmelCase_ = weight
subgraph.add_edge(__UpperCAmelCase, __UpperCAmelCase )
return subgraph
def __UpperCamelCase ( _A = "p107_network.txt" ):
lowerCAmelCase_ = os.path.abspath(os.path.dirname(_A ) )
lowerCAmelCase_ = os.path.join(_A , _A )
lowerCAmelCase_ = {}
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
with open(_A ) as f:
lowerCAmelCase_ = f.read().strip().split('''\n''' )
lowerCAmelCase_ = [line.split(''',''' ) for line in data]
for edgea in range(1 , len(_A ) ):
for edgea in range(_A ):
if adjaceny_matrix[edgea][edgea] != "-":
lowerCAmelCase_ = int(adjaceny_matrix[edgea][edgea] )
lowerCAmelCase_ = Graph(set(range(len(_A ) ) ) , _A )
lowerCAmelCase_ = graph.prims_algorithm()
lowerCAmelCase_ = sum(graph.edges.values() )
lowerCAmelCase_ = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f"{solution() = }")
| 278 |
"""simple docstring"""
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class snake_case :
def __init__( self , __UpperCAmelCase = "cpu" , __UpperCAmelCase = "openai/clip-vit-large-patch14") ->None:
a_ = device
a_ = CLIPTokenizerFast.from_pretrained(__UpperCAmelCase)
a_ = [0.48_145_466, 0.4_578_275, 0.40_821_073]
a_ = [0.26_862_954, 0.26_130_258, 0.27_577_711]
a_ = torchvision.transforms.Normalize(self.image_mean , self.image_std)
a_ = torchvision.transforms.Resize(2_24)
a_ = torchvision.transforms.CenterCrop(2_24)
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->List[Any]:
a_ = self.resize(__UpperCAmelCase)
a_ = self.center_crop(__UpperCAmelCase)
a_ = self.normalize(__UpperCAmelCase)
return images
def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase) ->Union[str, Any]:
a_ = self.tokenizer(text=__UpperCAmelCase , **__UpperCAmelCase)
a_ = self.preprocess_img(__UpperCAmelCase)
a_ = {key: value.to(self.device) for (key, value) in encoding.items()}
return encoding
class snake_case ( nn.Module ):
def __init__( self , __UpperCAmelCase=10 , __UpperCAmelCase=0.01 , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase="image" , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False , ) ->None:
super().__init__()
a_ = None
a_ = device if device else get_device()
if vqgan:
a_ = vqgan
else:
a_ = load_vqgan(self.device , conf_path=__UpperCAmelCase , ckpt_path=__UpperCAmelCase)
self.vqgan.eval()
if clip:
a_ = clip
else:
a_ = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
self.clip.to(self.device)
a_ = ProcessorGradientFlow(device=self.device)
a_ = iterations
a_ = lr
a_ = log
a_ = make_grid
a_ = return_val
a_ = quantize
a_ = self.vqgan.decoder.z_shape
def UpperCAmelCase__ ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=5 , __UpperCAmelCase=True) ->Any:
a_ = []
if output_path is None:
a_ = "./animation.gif"
if input_path is None:
a_ = self.save_path
a_ = sorted(glob(input_path + "/*"))
if not len(__UpperCAmelCase):
raise ValueError(
"No images found in save path, aborting (did you pass save_intermediate=True to the generate"
" function?)")
if len(__UpperCAmelCase) == 1:
print("Only one image found in save path, (did you pass save_intermediate=True to the generate function?)")
a_ = total_duration / len(__UpperCAmelCase)
a_ = [frame_duration] * len(__UpperCAmelCase)
if extend_frames:
a_ = 1.5
a_ = 3
for file_name in paths:
if file_name.endswith(".png"):
images.append(imageio.imread(__UpperCAmelCase))
imageio.mimsave(__UpperCAmelCase , __UpperCAmelCase , duration=__UpperCAmelCase)
print(F'''gif saved to {output_path}''')
def UpperCAmelCase__ ( self , __UpperCAmelCase=None , __UpperCAmelCase=None) ->List[Any]:
if not (path or img):
raise ValueError("Input either path or tensor")
if img is not None:
raise NotImplementedError
a_ = preprocess(Image.open(__UpperCAmelCase) , target_image_size=2_56).to(self.device)
a_ = preprocess_vqgan(__UpperCAmelCase)
a_ , *a_ = self.vqgan.encode(__UpperCAmelCase)
return z
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Tuple:
a_ = self.latent.detach().requires_grad_()
a_ = base_latent + transform_vector
if self.quantize:
a_ , *a_ = self.vqgan.quantize(__UpperCAmelCase)
else:
a_ = trans_latent
return self.vqgan.decode(__UpperCAmelCase)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None) ->str:
a_ = self.clip_preprocessor(text=__UpperCAmelCase , images=__UpperCAmelCase , return_tensors="pt" , padding=__UpperCAmelCase)
a_ = self.clip(**__UpperCAmelCase)
a_ = clip_outputs.logits_per_image
if weights is not None:
a_ = similarity_logits * weights
return similarity_logits.sum()
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Optional[int]:
a_ = self._get_clip_similarity(pos_prompts["prompts"] , __UpperCAmelCase , weights=(1 / pos_prompts["weights"]))
if neg_prompts:
a_ = self._get_clip_similarity(neg_prompts["prompts"] , __UpperCAmelCase , weights=neg_prompts["weights"])
else:
a_ = torch.tensor([1] , device=self.device)
a_ = -torch.log(__UpperCAmelCase) + torch.log(__UpperCAmelCase)
return loss
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->int:
a_ = torch.randn_like(self.latent , requires_grad=__UpperCAmelCase , device=self.device)
a_ = torch.optim.Adam([vector] , lr=self.lr)
for i in range(self.iterations):
optim.zero_grad()
a_ = self._add_vector(__UpperCAmelCase)
a_ = loop_post_process(__UpperCAmelCase)
a_ = self._get_CLIP_loss(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
print("CLIP loss" , __UpperCAmelCase)
if self.log:
wandb.log({"CLIP Loss": clip_loss})
clip_loss.backward(retain_graph=__UpperCAmelCase)
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0])
else:
yield vector
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Tuple:
wandb.init(reinit=__UpperCAmelCase , project="face-editor")
wandb.config.update({"Positive Prompts": positive_prompts})
wandb.config.update({"Negative Prompts": negative_prompts})
wandb.config.update({"lr": self.lr, "iterations": self.iterations})
if image_path:
a_ = Image.open(__UpperCAmelCase)
a_ = image.resize((2_56, 2_56))
wandb.log("Original Image" , wandb.Image(__UpperCAmelCase))
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->List[str]:
if not prompts:
return []
a_ = []
a_ = []
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
a_ = [prompt.strip() for prompt in prompts.split("|")]
for prompt in prompts:
if isinstance(__UpperCAmelCase , (tuple, list)):
a_ = prompt[0]
a_ = float(prompt[1])
elif ":" in prompt:
a_ , a_ = prompt.split(":")
a_ = float(__UpperCAmelCase)
else:
a_ = prompt
a_ = 1.0
processed_prompts.append(__UpperCAmelCase)
weights.append(__UpperCAmelCase)
return {
"prompts": processed_prompts,
"weights": torch.tensor(__UpperCAmelCase , device=self.device),
}
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=None , ) ->List[Any]:
if image_path:
a_ = self._get_latent(__UpperCAmelCase)
else:
a_ = torch.randn(self.latent_dim , device=self.device)
if self.log:
self._init_logging(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
assert pos_prompts, "You must provide at least one positive prompt."
a_ = self.process_prompts(__UpperCAmelCase)
a_ = self.process_prompts(__UpperCAmelCase)
if save_final and save_path is None:
a_ = os.path.join("./outputs/" , "_".join(pos_prompts["prompts"]))
if not os.path.exists(__UpperCAmelCase):
os.makedirs(__UpperCAmelCase)
else:
a_ = save_path + "_" + get_timestamp()
os.makedirs(__UpperCAmelCase)
a_ = save_path
a_ = self.vqgan.decode(self.latent)[0]
if show_intermediate:
print("Original Image")
show_pil(custom_to_pil(__UpperCAmelCase))
a_ = loop_post_process(__UpperCAmelCase)
for iter, transformed_img in enumerate(self._optimize_CLIP(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)):
if show_intermediate:
show_pil(__UpperCAmelCase)
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F'''iter_{iter:03d}.png'''))
if self.log:
wandb.log({"Image": wandb.Image(__UpperCAmelCase)})
if show_final:
show_pil(__UpperCAmelCase)
if save_final:
transformed_img.save(os.path.join(self.save_path , F'''iter_{iter:03d}_final.png''')) | 243 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Union[str, Any] = logging.get_logger(__name__)
_lowercase : Any = {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Union[str, Any] = "speech_to_text"
__magic_name__ : Any = ["past_key_values"]
__magic_name__ : List[str] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Optional[Any] , lowerCAmelCase : Dict=10000 , lowerCAmelCase : List[Any]=12 , lowerCAmelCase : Optional[int]=2048 , lowerCAmelCase : Dict=4 , lowerCAmelCase : List[Any]=6 , lowerCAmelCase : Dict=2048 , lowerCAmelCase : Optional[int]=4 , lowerCAmelCase : List[Any]=0.0 , lowerCAmelCase : List[str]=0.0 , lowerCAmelCase : int=True , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : List[Any]="relu" , lowerCAmelCase : Any=256 , lowerCAmelCase : Tuple=0.1 , lowerCAmelCase : Dict=0.0 , lowerCAmelCase : str=0.0 , lowerCAmelCase : str=0.02 , lowerCAmelCase : Tuple=2 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : str=1 , lowerCAmelCase : Tuple=0 , lowerCAmelCase : int=2 , lowerCAmelCase : int=6000 , lowerCAmelCase : Optional[Any]=1024 , lowerCAmelCase : List[str]=2 , lowerCAmelCase : List[str]=(5, 5) , lowerCAmelCase : Union[str, Any]=1024 , lowerCAmelCase : str=80 , lowerCAmelCase : Optional[int]=1 , **lowerCAmelCase : int , )-> List[str]:
"""simple docstring"""
UpperCAmelCase = vocab_size
UpperCAmelCase = d_model
UpperCAmelCase = encoder_ffn_dim
UpperCAmelCase = encoder_layers
UpperCAmelCase = encoder_attention_heads
UpperCAmelCase = decoder_ffn_dim
UpperCAmelCase = decoder_layers
UpperCAmelCase = decoder_attention_heads
UpperCAmelCase = dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = activation_dropout
UpperCAmelCase = activation_function
UpperCAmelCase = init_std
UpperCAmelCase = encoder_layerdrop
UpperCAmelCase = decoder_layerdrop
UpperCAmelCase = use_cache
UpperCAmelCase = encoder_layers
UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase = max_source_positions
UpperCAmelCase = max_target_positions
UpperCAmelCase = num_conv_layers
UpperCAmelCase = list(lowerCAmelCase )
UpperCAmelCase = conv_channels
UpperCAmelCase = input_feat_per_channel
UpperCAmelCase = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '''
F"""but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, """
F"""`config.num_conv_layers = {self.num_conv_layers}`.""" )
super().__init__(
pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , **lowerCAmelCase , )
| 364 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def lowerCamelCase__ ( A : int , A : int , A : int , A : int , A : int , A : int ):
'''simple docstring'''
if (ksize % 2) == 0:
UpperCAmelCase = ksize + 1
UpperCAmelCase = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(A ):
for x in range(A ):
# distance from center
UpperCAmelCase = x - ksize // 2
UpperCAmelCase = y - ksize // 2
# degree to radiant
UpperCAmelCase = theta / 1_80 * np.pi
UpperCAmelCase = np.cos(_theta )
UpperCAmelCase = np.sin(_theta )
# get kernel x
UpperCAmelCase = cos_theta * px + sin_theta * py
# get kernel y
UpperCAmelCase = -sin_theta * px + cos_theta * py
# fill kernel
UpperCAmelCase = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
_lowercase : Tuple = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
_lowercase : int = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
_lowercase : List[str] = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
_lowercase : List[Any] = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
_lowercase : Optional[int] = out / out.max() * 255
_lowercase : Optional[int] = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 91 | 0 |
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( _a ):
_a = (DPMSolverSinglestepScheduler,)
_a = (('num_inference_steps', 25),)
def __lowercase ( self : str , **lowerCAmelCase : int ):
lowerCAmelCase = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""solver_order""": 2,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
"""sample_max_value""": 1.0,
"""algorithm_type""": """dpmsolver++""",
"""solver_type""": """midpoint""",
"""lambda_min_clipped""": -float("""inf""" ),
"""variance_type""": None,
}
config.update(**lowerCAmelCase )
return config
def __lowercase ( self : List[str] , lowerCAmelCase : List[Any]=0 , **lowerCAmelCase : int ):
lowerCAmelCase = dict(self.forward_default_kwargs )
lowerCAmelCase = kwargs.pop("""num_inference_steps""" , lowerCAmelCase )
lowerCAmelCase = self.dummy_sample
lowerCAmelCase = 0.1 * sample
lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase = self.get_scheduler_config(**lowerCAmelCase )
lowerCAmelCase = scheduler_class(**lowerCAmelCase )
scheduler.set_timesteps(lowerCAmelCase )
# copy over dummy past residuals
lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase )
lowerCAmelCase = scheduler_class.from_pretrained(lowerCAmelCase )
new_scheduler.set_timesteps(lowerCAmelCase )
# copy over dummy past residuals
lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase , lowerCAmelCase = sample, sample
for t in range(lowerCAmelCase , time_step + scheduler.config.solver_order + 1 ):
lowerCAmelCase = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
lowerCAmelCase = new_scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowercase ( self : Union[str, Any] ):
pass
def __lowercase ( self : str , lowerCAmelCase : str=0 , **lowerCAmelCase : Any ):
lowerCAmelCase = dict(self.forward_default_kwargs )
lowerCAmelCase = kwargs.pop("""num_inference_steps""" , lowerCAmelCase )
lowerCAmelCase = self.dummy_sample
lowerCAmelCase = 0.1 * sample
lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**lowerCAmelCase )
scheduler.set_timesteps(lowerCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase )
lowerCAmelCase = scheduler_class.from_pretrained(lowerCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
lowerCAmelCase = new_scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowercase ( self : Optional[int] , lowerCAmelCase : Any=None , **lowerCAmelCase : Optional[Any] ):
if scheduler is None:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(**lowerCAmelCase )
lowerCAmelCase = scheduler_class(**lowerCAmelCase )
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(**lowerCAmelCase )
lowerCAmelCase = scheduler_class(**lowerCAmelCase )
lowerCAmelCase = 10
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase = model(lowerCAmelCase , lowerCAmelCase )
lowerCAmelCase = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ).prev_sample
return sample
def __lowercase ( self : Tuple ):
lowerCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
lowerCAmelCase = 50
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
lowerCAmelCase = model(lowerCAmelCase , lowerCAmelCase )
lowerCAmelCase = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ).prev_sample
lowerCAmelCase = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2574 ) < 1e-3
def __lowercase ( self : Dict ):
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase )
def __lowercase ( self : Any ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
lowerCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
lowerCAmelCase = self.full_loop(scheduler=lowerCAmelCase )
lowerCAmelCase = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1e-3
lowerCAmelCase = DEISMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowerCAmelCase = self.full_loop(scheduler=lowerCAmelCase )
lowerCAmelCase = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1e-3
def __lowercase ( self : List[str] ):
self.check_over_configs(thresholding=lowerCAmelCase )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowerCAmelCase , prediction_type=lowerCAmelCase , sample_max_value=lowerCAmelCase , algorithm_type="""dpmsolver++""" , solver_order=lowerCAmelCase , solver_type=lowerCAmelCase , )
def __lowercase ( self : List[str] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase )
def __lowercase ( self : Union[str, Any] ):
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowerCAmelCase , solver_type=lowerCAmelCase , prediction_type=lowerCAmelCase , algorithm_type=lowerCAmelCase , )
lowerCAmelCase = self.full_loop(
solver_order=lowerCAmelCase , solver_type=lowerCAmelCase , prediction_type=lowerCAmelCase , algorithm_type=lowerCAmelCase , )
assert not torch.isnan(lowerCAmelCase ).any(), "Samples have nan numbers"
def __lowercase ( self : Union[str, Any] ):
self.check_over_configs(lower_order_final=lowerCAmelCase )
self.check_over_configs(lower_order_final=lowerCAmelCase )
def __lowercase ( self : List[str] ):
self.check_over_configs(lambda_min_clipped=-float("""inf""" ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def __lowercase ( self : List[Any] ):
self.check_over_configs(variance_type=lowerCAmelCase )
self.check_over_configs(variance_type="""learned_range""" )
def __lowercase ( self : int ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=lowerCAmelCase , time_step=0 )
def __lowercase ( self : int ):
lowerCAmelCase = self.full_loop()
lowerCAmelCase = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1e-3
def __lowercase ( self : str ):
lowerCAmelCase = self.full_loop(use_karras_sigmas=lowerCAmelCase )
lowerCAmelCase = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2248 ) < 1e-3
def __lowercase ( self : Dict ):
lowerCAmelCase = self.full_loop(prediction_type="""v_prediction""" )
lowerCAmelCase = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_mean.item() - 0.1453 ) < 1e-3
def __lowercase ( self : Any ):
lowerCAmelCase = self.full_loop(prediction_type="""v_prediction""" , use_karras_sigmas=lowerCAmelCase )
lowerCAmelCase = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_mean.item() - 0.0649 ) < 1e-3
def __lowercase ( self : Optional[Any] ):
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(thresholding=lowerCAmelCase , dynamic_thresholding_ratio=0 )
lowerCAmelCase = scheduler_class(**lowerCAmelCase )
lowerCAmelCase = 10
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase = model(lowerCAmelCase , lowerCAmelCase )
lowerCAmelCase = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ).prev_sample
assert sample.dtype == torch.floataa
| 155 |
"""simple docstring"""
def lowercase (snake_case__ : list[int] , snake_case__ : list[int] ) -> tuple[float, float]:
'''simple docstring'''
if not len(snake_case__ ) == len(snake_case__ ) == 3:
raise ValueError("""Please enter a valid equation.""" )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("""Both a & b of two equations can't be zero.""" )
# Extract the coefficients
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = equationa
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = equationa
# Calculate the determinants of the matrices
lowerCAmelCase = aa * ba - aa * ba
lowerCAmelCase = ca * ba - ca * ba
lowerCAmelCase = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("""Infinite solutions. (Consistent system)""" )
else:
raise ValueError("""No solution. (Inconsistent system)""" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
lowerCAmelCase = determinant_x / determinant
lowerCAmelCase = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 155 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase = {
'''configuration_speech_to_text''': ['''SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Speech2TextConfig'''],
'''processing_speech_to_text''': ['''Speech2TextProcessor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''Speech2TextTokenizer''']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''Speech2TextFeatureExtractor''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSpeech2TextForConditionalGeneration''',
'''TFSpeech2TextModel''',
'''TFSpeech2TextPreTrainedModel''',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Speech2TextForConditionalGeneration''',
'''Speech2TextModel''',
'''Speech2TextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 355 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE( __lowercase ) -> list[list[float]]:
A: list[list[float]] = []
for data in source_data:
for i, el in enumerate(__lowercase ):
if len(__lowercase ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(__lowercase ) )
return data_lists
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list[list[float]]:
A: list[list[float]] = []
for dlist, weight in zip(__lowercase , __lowercase ):
A: List[str] = min(__lowercase )
A: Union[str, Any] = max(__lowercase )
A: list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
A: List[str] = F"""Invalid weight of {weight:f} provided"""
raise ValueError(__lowercase )
score_lists.append(__lowercase )
return score_lists
def SCREAMING_SNAKE_CASE( __lowercase ) -> list[float]:
A: list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(__lowercase ):
A: str = final_scores[j] + ele
return final_scores
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list[list[float]]:
A: Any = get_data(__lowercase )
A: str = calculate_each_score(__lowercase , __lowercase )
A: int = generate_final_scores(__lowercase )
# append scores to source data
for i, ele in enumerate(__lowercase ):
source_data[i].append(__lowercase )
return source_data
| 334 | 0 |
def lowerCAmelCase_ ( A_):
UpperCamelCase__: Tuple = []
UpperCamelCase__: List[str] = set({"(", "[", "{"})
UpperCamelCase__: int = set({")", "]", "}"})
UpperCamelCase__: Optional[int] = {"{": "}", "[": "]", "(": ")"}
for i in range(len(A_)):
if s[i] in open_brackets:
stack.append(s[i])
elif s[i] in closed_brackets and (
len(A_) == 0 or (len(A_) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(A_) == 0
def lowerCAmelCase_ ( ):
UpperCamelCase__: Union[str, Any] = input("Enter sequence of brackets: ")
if is_balanced(A_):
print(A_ ,"is balanced")
else:
print(A_ ,"is not balanced")
if __name__ == "__main__":
main()
| 149 |
import os
from datetime import datetime as dt
from github import Github
A__: int = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def lowerCAmelCase_ ( ):
UpperCamelCase__: Dict = Github(os.environ["GITHUB_TOKEN"])
UpperCamelCase__: Union[str, Any] = g.get_repo("huggingface/diffusers")
UpperCamelCase__: str = repo.get_issues(state="open")
for issue in open_issues:
UpperCamelCase__: Union[str, Any] = sorted(issue.get_comments() ,key=lambda A_: i.created_at ,reverse=A_)
UpperCamelCase__: Tuple = comments[0] if len(A_) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="closed")
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="open")
issue.remove_from_labels("stale")
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored.")
issue.add_to_labels("stale")
if __name__ == "__main__":
main()
| 149 | 1 |
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase_ ( UpperCamelCase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ : List[str] = DebertaTokenizer
UpperCamelCase__ : List[str] = True
UpperCamelCase__ : Any = DebertaTokenizerFast
def _A ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__SCREAMING_SNAKE_CASE = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'[UNK]',
]
__SCREAMING_SNAKE_CASE = dict(zip(_A , range(len(_A ) ) ) )
__SCREAMING_SNAKE_CASE = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__SCREAMING_SNAKE_CASE = {'unk_token': '[UNK]'}
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_A ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_A ) )
def _A ( self , **_A ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_A )
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 'lower newer'
__SCREAMING_SNAKE_CASE = 'lower newer'
return input_text, output_text
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = 'lower newer'
__SCREAMING_SNAKE_CASE = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
__SCREAMING_SNAKE_CASE = tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
__SCREAMING_SNAKE_CASE = tokens + [tokenizer.unk_token]
__SCREAMING_SNAKE_CASE = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , _A )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = tokenizer('Hello' , 'World' )
__SCREAMING_SNAKE_CASE = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['token_type_ids'] , _A )
@slow
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained('microsoft/deberta-base' )
__SCREAMING_SNAKE_CASE = tokenizer.encode('sequence builders' , add_special_tokens=_A )
__SCREAMING_SNAKE_CASE = tokenizer.encode('multi-sequence build' , add_special_tokens=_A )
__SCREAMING_SNAKE_CASE = tokenizer.encode(
'sequence builders' , add_special_tokens=_A , add_prefix_space=_A )
__SCREAMING_SNAKE_CASE = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=_A , add_prefix_space=_A )
__SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(_A )
__SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(_A , _A )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
__SCREAMING_SNAKE_CASE = tokenizer_class.from_pretrained('microsoft/deberta-base' )
__SCREAMING_SNAKE_CASE = [
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
__SCREAMING_SNAKE_CASE = tokenizer(_A , padding=_A )
__SCREAMING_SNAKE_CASE = [tokenizer.decode(_A , skip_special_tokens=_A ) for seq in encoding['input_ids']]
# fmt: off
__SCREAMING_SNAKE_CASE = {
'input_ids': [
[1, 2_118, 11_126, 565, 35, 83, 25_191, 163, 18_854, 13, 12_156, 12, 16_101, 25_376, 13_807, 9, 22_205, 27_893, 1_635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2_118, 11_126, 565, 24_536, 80, 43_797, 4_878, 7_373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3_724, 1_538, 33_183, 11_303, 43_797, 1_938, 4, 870, 24_165, 29_105, 5, 739, 32_644, 33_183, 11_303, 36_173, 88, 80, 650, 7_821, 45_940, 6, 52, 2_559, 5, 1_836, 9, 5, 7_397, 13_171, 31, 5, 1_836, 9, 32_644, 33_183, 11_303, 4, 2]
],
'token_type_ids': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
__SCREAMING_SNAKE_CASE = [
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
self.assertDictEqual(encoding.data , _A )
for expected, decoded in zip(_A , _A ):
self.assertEqual(_A , _A )
| 118 |
import os
def __lowercase ( a__ = "input.txt" ) -> int:
with open(os.path.join(os.path.dirname(a__ ) , a__ ) ) as input_file:
__SCREAMING_SNAKE_CASE = [
[int(a__ ) for element in line.split(',' )]
for line in input_file.readlines()
]
__SCREAMING_SNAKE_CASE = len(a__ )
__SCREAMING_SNAKE_CASE = len(matrix[0] )
__SCREAMING_SNAKE_CASE = [[-1 for _ in range(a__ )] for _ in range(a__ )]
for i in range(a__ ):
__SCREAMING_SNAKE_CASE = matrix[i][0]
for j in range(1 , a__ ):
for i in range(a__ ):
__SCREAMING_SNAKE_CASE = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , a__ ):
__SCREAMING_SNAKE_CASE = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
__SCREAMING_SNAKE_CASE = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 118 | 1 |
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : Tuple = CustomTokenizer
pass
| 257 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
lowerCAmelCase__ : List[Any] =None
lowerCAmelCase__ : Optional[Any] =logging.get_logger(__name__)
lowerCAmelCase__ : Optional[int] ={'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase__ : int ={
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase__ : int ={
'''facebook/nllb-large-en-ro''': 1024,
'''facebook/nllb-200-distilled-600M''': 1024,
}
# fmt: off
lowerCAmelCase__ : Dict =['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = VOCAB_FILES_NAMES
UpperCamelCase__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : Dict = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Dict = ['''input_ids''', '''attention_mask''']
UpperCamelCase__ : List[str] = NllbTokenizer
UpperCamelCase__ : List[int] = []
UpperCamelCase__ : List[int] = []
def __init__( self , _A=None , _A=None , _A="<s>" , _A="</s>" , _A="</s>" , _A="<s>" , _A="<unk>" , _A="<pad>" , _A="<mask>" , _A=None , _A=None , _A=None , _A=False , **_A , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token
__SCREAMING_SNAKE_CASE = legacy_behaviour
super().__init__(
vocab_file=_A , tokenizer_file=_A , bos_token=_A , eos_token=_A , sep_token=_A , cls_token=_A , unk_token=_A , pad_token=_A , mask_token=_A , src_lang=_A , tgt_lang=_A , additional_special_tokens=_A , legacy_behaviour=_A , **_A , )
__SCREAMING_SNAKE_CASE = vocab_file
__SCREAMING_SNAKE_CASE = False if not self.vocab_file else True
__SCREAMING_SNAKE_CASE = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
__SCREAMING_SNAKE_CASE = {
lang_code: self.convert_tokens_to_ids(_A ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__SCREAMING_SNAKE_CASE = src_lang if src_lang is not None else 'eng_Latn'
__SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(self._src_lang )
__SCREAMING_SNAKE_CASE = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _A ( self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _A ( self , _A , _A = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _A ( self , _A , _A = None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [self.sep_token_id]
__SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _A ( self , _A , _A , _A , _A , **_A ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
__SCREAMING_SNAKE_CASE = src_lang
__SCREAMING_SNAKE_CASE = self(_A , add_special_tokens=_A , return_tensors=_A , **_A )
__SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(_A )
__SCREAMING_SNAKE_CASE = tgt_lang_id
return inputs
def _A ( self , _A , _A = "eng_Latn" , _A = None , _A = "fra_Latn" , **_A , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = src_lang
__SCREAMING_SNAKE_CASE = tgt_lang
return super().prepare_seqaseq_batch(_A , _A , **_A )
def _A ( self ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def _A ( self ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(_A )
if self.legacy_behaviour:
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = [self.eos_token_id, self.cur_lang_code]
else:
__SCREAMING_SNAKE_CASE = [self.cur_lang_code]
__SCREAMING_SNAKE_CASE = [self.eos_token_id]
__SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.prefix_tokens )
__SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.suffix_tokens )
__SCREAMING_SNAKE_CASE = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(_A )
if self.legacy_behaviour:
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = [self.eos_token_id, self.cur_lang_code]
else:
__SCREAMING_SNAKE_CASE = [self.cur_lang_code]
__SCREAMING_SNAKE_CASE = [self.eos_token_id]
__SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.prefix_tokens )
__SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.suffix_tokens )
__SCREAMING_SNAKE_CASE = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _A ( self , _A , _A = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
__SCREAMING_SNAKE_CASE = os.path.join(
_A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ):
copyfile(self.vocab_file , _A )
return (out_vocab_file,)
| 257 | 1 |
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
a__: Tuple = logging.get_logger(__name__)
def UpperCamelCase__( UpperCamelCase__ : nn.ModuleList , UpperCamelCase__ : nn.ModuleList , UpperCamelCase__ : List[int] )->None:
A__ = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ ), f"{len(UpperCamelCase__ )} != {len(UpperCamelCase__ )}"
dest_layers.load_state_dict(layers_to_copy.state_dict() )
a__: Optional[int] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
a__: str = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def UpperCamelCase__( UpperCamelCase__ : int , UpperCamelCase__ : List[str] )->Tuple:
try:
A__ = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
f"no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"
f" {n_student}" )
return list(range(UpperCamelCase__ ) )
def UpperCamelCase__( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] )->List[int]:
if n_student > n_teacher:
raise ValueError(f"Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}" )
elif n_teacher == n_student:
return list(range(UpperCamelCase__ ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def UpperCamelCase__( UpperCamelCase__ : Union[str, PreTrainedModel] , UpperCamelCase__ : Union[str, Path] = "student" , UpperCamelCase__ : Union[int, None] = None , UpperCamelCase__ : Union[int, None] = None , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Optional[int]=None , **UpperCamelCase__ : Optional[int] , )->Tuple[PreTrainedModel, List[int], List[int]]:
A__ = '''encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'''
assert (e is not None) or (d is not None), _msg
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
AutoTokenizer.from_pretrained(UpperCamelCase__ ).save_pretrained(UpperCamelCase__ ) # purely for convenience
A__ = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase__ ).eval()
else:
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), f"teacher must be a model or string got type {type(UpperCamelCase__ )}"
A__ = teacher.config.to_diff_dict()
try:
A__ , A__ = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
A__ = teacher_e
if d is None:
A__ = teacher_d
init_kwargs.update({'''encoder_layers''': e, '''decoder_layers''': d} )
except AttributeError: # T5
if hasattr(teacher.config , '''num_encoder_layers''' ):
A__ , A__ = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
A__ , A__ = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
A__ = teacher_e
if d is None:
A__ = teacher_d
if hasattr(teacher.config , '''num_encoder_layers''' ):
init_kwargs.update({'''num_encoder_layers''': e, '''num_decoder_layers''': d} )
else:
init_kwargs.update({'''num_layers''': e, '''num_decoder_layers''': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(UpperCamelCase__ )
# Copy weights
A__ = teacher.config_class(**UpperCamelCase__ )
A__ = AutoModelForSeqaSeqLM.from_config(UpperCamelCase__ )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
A__ = student.load_state_dict(teacher.state_dict() , strict=UpperCamelCase__ )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
A__ , A__ = list(range(UpperCamelCase__ ) ), list(range(UpperCamelCase__ ) )
logger.info(
f"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"
f" {save_path}" )
student.save_pretrained(UpperCamelCase__ )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
A__ = pick_layers_to_copy(UpperCamelCase__ , UpperCamelCase__ )
if d_layers_to_copy is None:
A__ = pick_layers_to_copy(UpperCamelCase__ , UpperCamelCase__ )
try:
if hasattr(
UpperCamelCase__ , '''prophetnet''' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , UpperCamelCase__ )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , UpperCamelCase__ )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , UpperCamelCase__ )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , UpperCamelCase__ )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , UpperCamelCase__ )
copy_layers(teacher.decoder.block , student.decoder.block , UpperCamelCase__ )
logger.info(
f"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}" )
A__ = {
'''teacher_type''': teacher.config.model_type,
'''copied_encoder_layers''': e_layers_to_copy,
'''copied_decoder_layers''': d_layers_to_copy,
}
student.save_pretrained(UpperCamelCase__ )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 39 |
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class SCREAMING_SNAKE_CASE__ ( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase = 1.0,__lowerCamelCase = None,):
super().__init__()
A__ = initial_learning_rate
A__ = warmup_steps
A__ = power
A__ = decay_schedule_fn
A__ = name
def __call__( self,__lowerCamelCase ):
with tf.name_scope(self.name or '''WarmUp''' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
A__ = tf.cast(__lowerCamelCase,tf.floataa )
A__ = tf.cast(self.warmup_steps,tf.floataa )
A__ = global_step_float / warmup_steps_float
A__ = self.initial_learning_rate * tf.math.pow(__lowerCamelCase,self.power )
return tf.cond(
global_step_float < warmup_steps_float,lambda: warmup_learning_rate,lambda: self.decay_schedule_fn(step - self.warmup_steps ),name=__lowerCamelCase,)
def UpperCamelCase ( self ):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def UpperCamelCase__( UpperCamelCase__ : float , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : float = 0.9 , UpperCamelCase__ : float = 0.999 , UpperCamelCase__ : float = 1e-8 , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : float = 1.0 , UpperCamelCase__ : Optional[List[str]] = None , )->int:
A__ = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=UpperCamelCase__ , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=UpperCamelCase__ , )
if num_warmup_steps:
A__ = WarmUp(
initial_learning_rate=UpperCamelCase__ , decay_schedule_fn=UpperCamelCase__ , warmup_steps=UpperCamelCase__ , )
if weight_decay_rate > 0.0:
A__ = AdamWeightDecay(
learning_rate=UpperCamelCase__ , weight_decay_rate=UpperCamelCase__ , beta_a=UpperCamelCase__ , beta_a=UpperCamelCase__ , epsilon=UpperCamelCase__ , clipnorm=UpperCamelCase__ , global_clipnorm=UpperCamelCase__ , exclude_from_weight_decay=['''LayerNorm''', '''layer_norm''', '''bias'''] , include_in_weight_decay=UpperCamelCase__ , )
else:
A__ = tf.keras.optimizers.Adam(
learning_rate=UpperCamelCase__ , beta_a=UpperCamelCase__ , beta_a=UpperCamelCase__ , epsilon=UpperCamelCase__ , clipnorm=UpperCamelCase__ , global_clipnorm=UpperCamelCase__ , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
def __init__( self,__lowerCamelCase = 0.001,__lowerCamelCase = 0.9,__lowerCamelCase = 0.999,__lowerCamelCase = 1E-7,__lowerCamelCase = False,__lowerCamelCase = 0.0,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = "AdamWeightDecay",**__lowerCamelCase,):
super().__init__(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,**__lowerCamelCase )
A__ = weight_decay_rate
A__ = include_in_weight_decay
A__ = exclude_from_weight_decay
@classmethod
def UpperCamelCase ( cls,__lowerCamelCase ):
A__ = {'''WarmUp''': WarmUp}
return super(__lowerCamelCase,cls ).from_config(__lowerCamelCase,custom_objects=__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ):
super(__lowerCamelCase,self )._prepare_local(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase )
A__ = tf.constant(
self.weight_decay_rate,name='''adam_weight_decay_rate''' )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ):
A__ = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['''weight_decay_rate'''],use_locking=self._use_locking,)
return tf.no_op()
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase=None,**__lowerCamelCase ):
A__ , A__ = list(zip(*__lowerCamelCase ) )
return super(__lowerCamelCase,self ).apply_gradients(zip(__lowerCamelCase,__lowerCamelCase ),name=__lowerCamelCase,**__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ):
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
A__ = apply_state or {}
A__ = apply_state.get((var_device, var_dtype) )
if coefficients is None:
A__ = self._fallback_apply_state(__lowerCamelCase,__lowerCamelCase )
A__ = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase=None ):
A__ , A__ = self._get_lr(var.device,var.dtype.base_dtype,__lowerCamelCase )
A__ = self._decay_weights_op(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase )
with tf.control_dependencies([decay] ):
return super(__lowerCamelCase,self )._resource_apply_dense(__lowerCamelCase,__lowerCamelCase,**__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase=None ):
A__ , A__ = self._get_lr(var.device,var.dtype.base_dtype,__lowerCamelCase )
A__ = self._decay_weights_op(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase )
with tf.control_dependencies([decay] ):
return super(__lowerCamelCase,self )._resource_apply_sparse(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,**__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = super().get_config()
config.update({'''weight_decay_rate''': self.weight_decay_rate} )
return config
def UpperCamelCase ( self,__lowerCamelCase ):
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(__lowerCamelCase,__lowerCamelCase ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(__lowerCamelCase,__lowerCamelCase ) is not None:
return False
return True
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
def __init__( self ):
A__ = []
A__ = None
@property
def UpperCamelCase ( self ):
if self._accum_steps is None:
A__ = tf.Variable(
tf.constant(0,dtype=tf.intaa ),trainable=__lowerCamelCase,synchronization=tf.VariableSynchronization.ON_READ,aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA,)
return self._accum_steps.value()
@property
def UpperCamelCase ( self ):
if not self._gradients:
raise ValueError('''The accumulator should be called first to initialize the gradients''' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self,__lowerCamelCase ):
if not self._gradients:
A__ = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(__lowerCamelCase ),trainable=__lowerCamelCase,synchronization=tf.VariableSynchronization.ON_READ,aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA,)
if gradient is not None
else gradient
for gradient in gradients
] )
if len(__lowerCamelCase ) != len(self._gradients ):
raise ValueError(f"Expected {len(self._gradients )} gradients, but got {len(__lowerCamelCase )}" )
for accum_gradient, gradient in zip(self._gradients,__lowerCamelCase ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(__lowerCamelCase )
self._accum_steps.assign_add(1 )
def UpperCamelCase ( self ):
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(__lowerCamelCase ) )
| 39 | 1 |
'''simple docstring'''
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=[] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = size[0] - overlap_pixels * 2
SCREAMING_SNAKE_CASE : Optional[Any] = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
SCREAMING_SNAKE_CASE : str = np.ones((size_y, size_x) , dtype=np.uinta ) * 2_55
SCREAMING_SNAKE_CASE : Any = np.pad(lowerCamelCase_ , mode="""linear_ramp""" , pad_width=lowerCamelCase_ , end_values=0 )
if "l" in remove_borders:
SCREAMING_SNAKE_CASE : List[Any] = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
SCREAMING_SNAKE_CASE : int = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
SCREAMING_SNAKE_CASE : Any = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
SCREAMING_SNAKE_CASE : str = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return max(lowerCamelCase_ , min(lowerCamelCase_ , lowerCamelCase_ ) )
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = list(lowerCamelCase_ )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
SCREAMING_SNAKE_CASE : Dict = clamp_rect(lowerCamelCase_ , [0, 0] , [image_size[0], image_size[1]] )
return rect
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = Image.new("""RGB""" , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(lowerCamelCase_ , (original_slice, 0) )
return result
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
SCREAMING_SNAKE_CASE : Any = tile.crop(lowerCamelCase_ )
return tile
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = n % d
return n - divisor
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase_ : AutoencoderKL , lowerCamelCase_ : CLIPTextModel , lowerCamelCase_ : CLIPTokenizer , lowerCamelCase_ : UNetaDConditionModel , lowerCamelCase_ : DDPMScheduler , lowerCamelCase_ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCamelCase_ : int = 3_50 , ):
'''simple docstring'''
super().__init__(
vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , unet=lowerCamelCase_ , low_res_scheduler=lowerCamelCase_ , scheduler=lowerCamelCase_ , max_noise_level=lowerCamelCase_ , )
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Any , lowerCamelCase_ : str , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
SCREAMING_SNAKE_CASE : Any = add_overlap_rect(lowerCamelCase_ , lowerCamelCase_ , image.size )
SCREAMING_SNAKE_CASE : Any = image.crop(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
SCREAMING_SNAKE_CASE : List[Any] = translated_slice_x - (original_image_slice / 2)
SCREAMING_SNAKE_CASE : Any = max(0 , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = squeeze_tile(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = to_input.size
SCREAMING_SNAKE_CASE : Tuple = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
SCREAMING_SNAKE_CASE : Optional[int] = super(lowerCamelCase_ , self ).__call__(image=lowerCamelCase_ , **lowerCamelCase_ ).images[0]
SCREAMING_SNAKE_CASE : Optional[int] = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
SCREAMING_SNAKE_CASE : Any = unsqueeze_tile(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
SCREAMING_SNAKE_CASE : Tuple = []
if x == 0:
remove_borders.append("""l""" )
elif crop_rect[2] == image.size[0]:
remove_borders.append("""r""" )
if y == 0:
remove_borders.append("""t""" )
elif crop_rect[3] == image.size[1]:
remove_borders.append("""b""" )
SCREAMING_SNAKE_CASE : int = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=lowerCamelCase_ ) , mode="""L""" , )
final_image.paste(
lowerCamelCase_ , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , lowerCamelCase_ )
@torch.no_grad()
def __call__( self : Dict , lowerCamelCase_ : Union[str, List[str]] , lowerCamelCase_ : Union[PIL.Image.Image, List[PIL.Image.Image]] , lowerCamelCase_ : int = 75 , lowerCamelCase_ : float = 9.0 , lowerCamelCase_ : int = 50 , lowerCamelCase_ : Optional[Union[str, List[str]]] = None , lowerCamelCase_ : Optional[int] = 1 , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : Optional[torch.Generator] = None , lowerCamelCase_ : Optional[torch.FloatTensor] = None , lowerCamelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase_ : int = 1 , lowerCamelCase_ : int = 1_28 , lowerCamelCase_ : int = 32 , lowerCamelCase_ : int = 32 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = Image.new("""RGB""" , (image.size[0] * 4, image.size[1] * 4) )
SCREAMING_SNAKE_CASE : Optional[Any] = math.ceil(image.size[0] / tile_size )
SCREAMING_SNAKE_CASE : Any = math.ceil(image.size[1] / tile_size )
SCREAMING_SNAKE_CASE : Tuple = tcx * tcy
SCREAMING_SNAKE_CASE : str = 0
for y in range(lowerCamelCase_ ):
for x in range(lowerCamelCase_ ):
self._process_tile(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , prompt=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , noise_level=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , )
current_count += 1
if callback is not None:
callback({"""progress""": current_count / total_tile_count, """image""": final_image} )
return final_image
def __A ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = """stabilityai/stable-diffusion-x4-upscaler"""
SCREAMING_SNAKE_CASE : Optional[Any] = StableDiffusionTiledUpscalePipeline.from_pretrained(lowerCamelCase_ , revision="""fp16""" , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : Optional[int] = pipe.to("""cuda""" )
SCREAMING_SNAKE_CASE : Optional[Any] = Image.open("""../../docs/source/imgs/diffusers_library.jpg""" )
def callback(lowerCamelCase_ ):
print(f'''progress: {obj["progress"]:.4f}''' )
obj["image"].save("""diffusers_library_progress.jpg""" )
SCREAMING_SNAKE_CASE : Any = pipe(image=lowerCamelCase_ , prompt="""Black font, white background, vector""" , noise_level=40 , callback=lowerCamelCase_ )
final_image.save("""diffusers_library.jpg""" )
if __name__ == "__main__":
main()
| 323 |
'''simple docstring'''
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (CMStochasticIterativeScheduler,)
SCREAMING_SNAKE_CASE__ = 10
def lowerCamelCase_ ( self : List[str] , **lowerCamelCase_ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = {
"""num_train_timesteps""": 2_01,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
config.update(**lowerCamelCase_ )
return config
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = 10
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : int = self.scheduler_classes[0](**lowerCamelCase_ )
scheduler.set_timesteps(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = scheduler.timesteps[0]
SCREAMING_SNAKE_CASE : Dict = scheduler.timesteps[1]
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample
SCREAMING_SNAKE_CASE : List[str] = 0.1 * sample
SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : List[str] = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = 1
scheduler.set_timesteps(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = scheduler.timesteps
SCREAMING_SNAKE_CASE : str = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = self.dummy_model()
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(lowerCamelCase_ ):
# 1. scale model input
SCREAMING_SNAKE_CASE : Optional[int] = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ )
# 2. predict noise residual
SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ , lowerCamelCase_ )
# 3. predict previous sample x_t-1
SCREAMING_SNAKE_CASE : List[str] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample
SCREAMING_SNAKE_CASE : Union[str, Any] = pred_prev_sample
SCREAMING_SNAKE_CASE : Any = torch.sum(torch.abs(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Optional[int] = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 192.7_614 ) < 1e-2
assert abs(result_mean.item() - 0.2_510 ) < 1e-3
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Tuple = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : int = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = [1_06, 0]
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = scheduler.timesteps
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = self.dummy_model()
SCREAMING_SNAKE_CASE : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ )
# 2. predict noise residual
SCREAMING_SNAKE_CASE : Any = model(lowerCamelCase_ , lowerCamelCase_ )
# 3. predict previous sample x_t-1
SCREAMING_SNAKE_CASE : str = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample
SCREAMING_SNAKE_CASE : Dict = pred_prev_sample
SCREAMING_SNAKE_CASE : Any = torch.sum(torch.abs(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Tuple = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 347.6_357 ) < 1e-2
assert abs(result_mean.item() - 0.4_527 ) < 1e-3
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Optional[int] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Any = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = [39, 30, 12, 15, 0]
with self.assertRaises(lowerCamelCase_ , msg="""`timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Dict = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Optional[int] = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = [39, 30, 12, 1, 0]
SCREAMING_SNAKE_CASE : Optional[Any] = len(lowerCamelCase_ )
with self.assertRaises(lowerCamelCase_ , msg="""Can only pass one of `num_inference_steps` or `timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=lowerCamelCase_ , timesteps=lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Any = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : int = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCamelCase_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
| 323 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ : Any = {
"configuration_mobilevit": ["MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileViTConfig", "MobileViTOnnxConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Any = ["MobileViTFeatureExtractor"]
lowerCAmelCase__ : Optional[int] = ["MobileViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Any = [
"MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileViTForImageClassification",
"MobileViTForSemanticSegmentation",
"MobileViTModel",
"MobileViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Tuple = [
"TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileViTForImageClassification",
"TFMobileViTForSemanticSegmentation",
"TFMobileViTModel",
"TFMobileViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 37 |
'''simple docstring'''
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
lowerCAmelCase__ : Dict = get_tests_dir("fixtures/dummy-config.json")
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
__UpperCAmelCase : str = 0
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("transformers.models.auto" ) )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
__UpperCAmelCase : List[str] = AutoConfig.from_pretrained("bert-base-uncased" )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase : Tuple = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
__UpperCAmelCase : Any = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = AutoConfig.for_model("roberta" )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
__UpperCAmelCase : int = os.path.join(UpperCAmelCase_ , "fake-roberta" )
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
with open(os.path.join(UpperCAmelCase_ , "config.json" ) , "w" ) as f:
f.write(json.dumps({} ) )
__UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertEqual(type(UpperCAmelCase_ ) , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
try:
AutoConfig.register("custom" , UpperCAmelCase_ )
# Wrong model type will raise an error
with self.assertRaises(UpperCAmelCase_ ):
AutoConfig.register("model" , UpperCAmelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCAmelCase_ ):
AutoConfig.register("bert" , UpperCAmelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
__UpperCAmelCase : List[str] = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase_ )
__UpperCAmelCase : Dict = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
with self.assertRaisesRegex(
UpperCAmelCase_ , "bert-base is not a local folder and is not a valid model identifier" ):
__UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained("bert-base" )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
with self.assertRaisesRegex(
UpperCAmelCase_ , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
__UpperCAmelCase : int = AutoConfig.from_pretrained(UpperCAmelCase_ , revision="aaaaaa" )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
with self.assertRaisesRegex(
UpperCAmelCase_ , "hf-internal-testing/no-config-test-repo does not appear to have a file named config.json." , ):
__UpperCAmelCase : Tuple = AutoConfig.from_pretrained("hf-internal-testing/no-config-test-repo" )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCAmelCase_ ):
__UpperCAmelCase : int = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCAmelCase_ ):
__UpperCAmelCase : Any = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=UpperCAmelCase_ )
__UpperCAmelCase : Optional[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=UpperCAmelCase_ )
self.assertEqual(config.__class__.__name__ , "NewModelConfig" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase_ )
__UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(UpperCAmelCase_ , trust_remote_code=UpperCAmelCase_ )
self.assertEqual(reloaded_config.__class__.__name__ , "NewModelConfig" )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = '''new-model'''
try:
AutoConfig.register("new-model" , UpperCAmelCase_ )
# If remote code is not set, the default is to use local
__UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" )
# If remote code is disabled, we load the local one.
__UpperCAmelCase : int = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=UpperCAmelCase_ )
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" )
# If remote is enabled, we load from the Hub
__UpperCAmelCase : Tuple = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=UpperCAmelCase_ )
self.assertEqual(config.__class__.__name__ , "NewModelConfig" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 37 | 1 |
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
lowercase : Any = 6_3_7_8_1_3_7.0
lowercase : Tuple = 6_3_5_6_7_5_2.3_1_4_2_4_5
lowercase : str = 6378137
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float) -> float:
'''simple docstring'''
__UpperCamelCase : Tuple = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
__UpperCamelCase : Any = atan((1 - flattening) * tan(radians(_lowerCamelCase)))
__UpperCamelCase : List[Any] = atan((1 - flattening) * tan(radians(_lowerCamelCase)))
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
__UpperCamelCase : Union[str, Any] = haversine_distance(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) / EQUATORIAL_RADIUS
# Intermediate P and Q values
__UpperCamelCase : Any = (b_lata + b_lata) / 2
__UpperCamelCase : Tuple = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
__UpperCamelCase : Dict = (sin(_lowerCamelCase) ** 2) * (cos(_lowerCamelCase) ** 2)
__UpperCamelCase : Dict = cos(sigma / 2) ** 2
__UpperCamelCase : Optional[Any] = (sigma - sin(_lowerCamelCase)) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
__UpperCamelCase : int = (cos(_lowerCamelCase) ** 2) * (sin(_lowerCamelCase) ** 2)
__UpperCamelCase : Optional[Any] = sin(sigma / 2) ** 2
__UpperCamelCase : str = (sigma + sin(_lowerCamelCase)) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod() | 232 |
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
_A = ComputeEnvironment.AMAZON_SAGEMAKER
_A = True
_A = 'ml.p3.2xlarge'
_A = 'accelerate_sagemaker_execution_role'
_A = 'hf-sm'
_A = 'us-east-1'
_A = 1
_A = 'accelerate-sagemaker-1'
_A = '1.6'
_A = '4.4'
_A = 'train.py'
_A = [
'--model_name_or_path',
'bert',
'--do_train',
'False',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
_A = [
'--model_name_or_path',
'bert',
'--do_train',
'--do_test',
'False',
'--do_predict',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def _lowerCamelCase ( self :Any ) -> Optional[int]:
# If no defaults are changed, `to_kwargs` returns an empty dict.
__UpperCamelCase : Optional[int] = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args["model_name_or_path"] , a )
assert isinstance(converted_args["do_train"] , a )
assert isinstance(converted_args["epochs"] , a )
assert isinstance(converted_args["learning_rate"] , a )
assert isinstance(converted_args["max_steps"] , a )
with pytest.raises(a ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args ) | 232 | 1 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class _lowerCAmelCase :
"""simple docstring"""
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple:
return None
class _lowerCAmelCase :
"""simple docstring"""
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any:
return None
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = [
# (model_name, model_kwargs)
('''bert-base-cased''', {}),
('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def UpperCAmelCase_ ( self ) -> List[str]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(_lowerCamelCase , """tf""" , 12 , **_lowerCamelCase )
@require_torch
@slow
def UpperCAmelCase_ ( self ) -> List[Any]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(_lowerCamelCase , """pt""" , 12 , **_lowerCamelCase )
@require_torch
@slow
def UpperCAmelCase_ ( self ) -> str:
from transformers import BertModel
A_ : Any = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words']
with NamedTemporaryFile(mode="""w+t""" ) as vocab_file:
vocab_file.write("""\n""".join(_lowerCamelCase ) )
vocab_file.flush()
A_ : Any = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
A_ : List[str] = BertModel(BertConfig(vocab_size=len(_lowerCamelCase ) ) )
model.save_pretrained(_lowerCamelCase )
self._test_export(_lowerCamelCase , """pt""" , 12 , _lowerCamelCase )
@require_tf
@slow
def UpperCAmelCase_ ( self ) -> Optional[int]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
A_ : Union[str, Any] = self._test_export(_lowerCamelCase , """tf""" , 12 , **_lowerCamelCase )
A_ : Any = quantize(Path(_lowerCamelCase ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(_lowerCamelCase ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
@require_torch
@slow
def UpperCAmelCase_ ( self ) -> List[str]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
A_ : Optional[int] = self._test_export(_lowerCamelCase , """pt""" , 12 , **_lowerCamelCase )
A_ : Union[str, Any] = quantize(_lowerCamelCase )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(_lowerCamelCase ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , **_lowerCamelCase ) -> List[str]:
try:
# Compute path
with TemporaryDirectory() as tempdir:
A_ : int = Path(_lowerCamelCase ).joinpath("""model.onnx""" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
return path
except Exception as e:
self.fail(_lowerCamelCase )
@require_torch
@require_tokenizers
@slow
def UpperCAmelCase_ ( self ) -> List[str]:
from transformers import BertModel
A_ : Tuple = BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
A_ : Tuple = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(_lowerCamelCase , _lowerCamelCase , """pt""" )
@require_tf
@require_tokenizers
@slow
def UpperCAmelCase_ ( self ) -> List[Any]:
from transformers import TFBertModel
A_ : Any = TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
A_ : List[str] = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(_lowerCamelCase , _lowerCamelCase , """tf""" )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
A_ : Union[str, Any] = FeatureExtractionPipeline(_lowerCamelCase , _lowerCamelCase )
A_ : Optional[int] = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1']
A_ : List[Any] = infer_shapes(_lowerCamelCase , _lowerCamelCase )
# Assert all variables are present
self.assertEqual(len(_lowerCamelCase ) , len(_lowerCamelCase ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , _lowerCamelCase )
self.assertSequenceEqual(variable_names[3:] , _lowerCamelCase )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: """batch""", 1: """sequence"""} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["""output_0"""] , {0: """batch""", 1: """sequence"""} )
self.assertDictEqual(shapes["""output_1"""] , {0: """batch"""} )
def UpperCAmelCase_ ( self ) -> List[str]:
A_ : Any = ['input_ids', 'attention_mask', 'token_type_ids']
A_ : Tuple = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]}
A_ : List[Any] = ensure_valid_input(FuncContiguousArgs() , _lowerCamelCase , _lowerCamelCase )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(_lowerCamelCase ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(_lowerCamelCase ) , set(_lowerCamelCase ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(_lowerCamelCase , (tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
A_ : Any = ensure_valid_input(FuncNonContiguousArgs() , _lowerCamelCase , _lowerCamelCase )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(_lowerCamelCase ) , 1 )
self.assertEqual(len(_lowerCamelCase ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["""input_ids"""] )
self.assertEqual(ordered_input_names[0] , """input_ids""" )
def UpperCAmelCase_ ( self ) -> Dict:
A_ : Tuple = generate_identified_filename(Path("""/home/something/my_fake_model.onnx""" ) , """-test""" )
self.assertEqual("""/home/something/my_fake_model-test.onnx""" , generated.as_posix() )
| 359 |
'''simple docstring'''
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
UpperCamelCase__ : Dict = logging.get_logger(__name__)
UpperCamelCase__ : Optional[Any] = 'T5Config'
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = '''mt5'''
lowerCamelCase = MTaConfig
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = '''mt5'''
lowerCamelCase = MTaConfig
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = '''mt5'''
lowerCamelCase = MTaConfig
| 164 | 0 |
'''simple docstring'''
from collections.abc import Generator
def __a():
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase = 0, 1
while True:
_lowerCAmelCase , _lowerCAmelCase = b, a + b
yield b
def __a(SCREAMING_SNAKE_CASE_ : int = 1000 ):
'''simple docstring'''
_lowerCAmelCase = 1
_lowerCAmelCase = fibonacci_generator()
while len(str(next(snake_case__ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 158 |
"""simple docstring"""
from typing import Any
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> list:
_validation(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
# Creates data structures and fill initial step
lowerCamelCase = {}
lowerCamelCase = {}
for state in states_space:
lowerCamelCase = observations_space[0]
lowerCamelCase = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
lowerCamelCase = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(snake_case__ ) ):
lowerCamelCase = observations_space[o]
lowerCamelCase = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
lowerCamelCase = """"""
lowerCamelCase = -1
for k_state in states_space:
lowerCamelCase = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
lowerCamelCase = probability
lowerCamelCase = k_state
# Update probabilities and pointers dicts
lowerCamelCase = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
lowerCamelCase = arg_max
# The final observation
lowerCamelCase = observations_space[len(snake_case__ ) - 1]
# argmax for given final observation
lowerCamelCase = """"""
lowerCamelCase = -1
for k_state in states_space:
lowerCamelCase = probabilities[(k_state, final_observation)]
if probability > max_probability:
lowerCamelCase = probability
lowerCamelCase = k_state
lowerCamelCase = arg_max
# Process pointers backwards
lowerCamelCase = last_state
lowerCamelCase = []
for o in range(len(snake_case__ ) - 1 , -1 , -1 ):
result.append(snake_case__ )
lowerCamelCase = pointers[previous, observations_space[o]]
result.reverse()
return result
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> None:
_validate_not_empty(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
_validate_lists(snake_case__ , snake_case__ )
_validate_dicts(
snake_case__ , snake_case__ , snake_case__ )
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> None:
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("""There's an empty parameter""" )
def a__ ( snake_case__ , snake_case__ ) -> None:
_validate_list(snake_case__ , """observations_space""" )
_validate_list(snake_case__ , """states_space""" )
def a__ ( snake_case__ , snake_case__ ) -> None:
if not isinstance(_object , snake_case__ ):
lowerCamelCase = F'{var_name} must be a list'
raise ValueError(snake_case__ )
else:
for x in _object:
if not isinstance(snake_case__ , snake_case__ ):
lowerCamelCase = F'{var_name} must be a list of strings'
raise ValueError(snake_case__ )
def a__ ( snake_case__ , snake_case__ , snake_case__ , ) -> None:
_validate_dict(snake_case__ , """initial_probabilities""" , snake_case__ )
_validate_nested_dict(snake_case__ , """transition_probabilities""" )
_validate_nested_dict(snake_case__ , """emission_probabilities""" )
def a__ ( snake_case__ , snake_case__ ) -> None:
_validate_dict(_object , snake_case__ , snake_case__ )
for x in _object.values():
_validate_dict(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = False ) -> None:
if not isinstance(_object , snake_case__ ):
lowerCamelCase = F'{var_name} must be a dict'
raise ValueError(snake_case__ )
if not all(isinstance(snake_case__ , snake_case__ ) for x in _object ):
lowerCamelCase = F'{var_name} all keys must be strings'
raise ValueError(snake_case__ )
if not all(isinstance(snake_case__ , snake_case__ ) for x in _object.values() ):
lowerCamelCase = """nested dictionary """ if nested else """"""
lowerCamelCase = F'{var_name} {nested_text}all values must be {value_type.__name__}'
raise ValueError(snake_case__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 291 | 0 |
from __future__ import annotations
_UpperCAmelCase : Any = list[list[int]]
# assigning initial values to the grid
_UpperCAmelCase : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
_UpperCAmelCase : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def A ( lowercase , lowercase , lowercase , lowercase ) -> bool:
'''simple docstring'''
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def A ( lowercase ) -> tuple[int, int] | None:
'''simple docstring'''
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def A ( lowercase ) -> Matrix | None:
'''simple docstring'''
if location := find_empty_location(lowercase ):
UpperCamelCase , UpperCamelCase = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(lowercase , lowercase , lowercase , lowercase ):
UpperCamelCase = digit
if sudoku(lowercase ) is not None:
return grid
UpperCamelCase = 0
return None
def A ( lowercase ) -> None:
'''simple docstring'''
for row in grid:
for cell in row:
print(lowercase , end=' ' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("\nExample grid:\n" + "=" * 20)
print_solution(example_grid)
print("\nExample grid solution:")
_UpperCAmelCase : str = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("Cannot find a solution.")
| 364 |
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class lowercase ( unittest.TestCase ):
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = get_activation('swish' )
self.assertIsInstance(A_ , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = get_activation('silu' )
self.assertIsInstance(A_ , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = get_activation('mish' )
self.assertIsInstance(A_ , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = get_activation('gelu' )
self.assertIsInstance(A_ , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 110 | 0 |
'''simple docstring'''
def UpperCamelCase_ ( _UpperCAmelCase : int ) -> int:
"""simple docstring"""
if n == 1 or not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return 0
elif n == 2:
return 1
else:
_UpperCAmelCase : List[Any] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def UpperCamelCase_ ( _UpperCAmelCase : int ) -> int:
"""simple docstring"""
_UpperCAmelCase : List[str] = 0
_UpperCAmelCase : Dict = 2
while digits < n:
index += 1
_UpperCAmelCase : Union[str, Any] = len(str(fibonacci(_UpperCAmelCase ) ) )
return index
def UpperCamelCase_ ( _UpperCAmelCase : int = 1_000 ) -> int:
"""simple docstring"""
return fibonacci_digits_index(_UpperCAmelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 31 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
lowerCAmelCase : int = None
lowerCAmelCase : Tuple = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase : Union[str, Any] = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""",
},
}
lowerCAmelCase : Optional[int] = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
lowerCAmelCase : Union[str, Any] = """▁"""
# Segments (not really needed)
lowerCAmelCase : str = 0
lowerCAmelCase : Optional[int] = 1
lowerCAmelCase : Tuple = 2
lowerCAmelCase : Optional[Any] = 3
lowerCAmelCase : List[Any] = 4
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = "left"
__UpperCamelCase = XLNetTokenizer
def __init__( self , _a=None , _a=None , _a=False , _a=True , _a=False , _a="<s>" , _a="</s>" , _a="<unk>" , _a="<sep>" , _a="<pad>" , _a="<cls>" , _a="<mask>" , _a=["<eop>", "<eod>"] , **_a , ):
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
super().__init__(
vocab_file=_a , tokenizer_file=_a , do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , additional_special_tokens=_a , **_a , )
lowerCamelCase = 3
lowerCamelCase = do_lower_case
lowerCamelCase = remove_space
lowerCamelCase = keep_accents
lowerCamelCase = vocab_file
lowerCamelCase = False if not self.vocab_file else True
def _lowerCAmelCase ( self , _a , _a = None ):
"""simple docstring"""
lowerCamelCase = [self.sep_token_id]
lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowerCAmelCase ( self , _a , _a = None ):
"""simple docstring"""
lowerCamelCase = [self.sep_token_id]
lowerCamelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowerCAmelCase ( self , _a , _a = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(_a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCamelCase = os.path.join(
_a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
return (out_vocab_file,)
| 291 | 0 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
lowerCamelCase = """hf-internal-testing/tiny-random-bert"""
lowerCamelCase = os.path.join(TRANSFORMERS_CACHE, """models--hf-internal-testing--tiny-random-bert""")
lowerCamelCase = """9b8c223d42b2188cb49d29af482996f9d0f3e5a6"""
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Any ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = cached_file(_UpperCAmelCase , _UpperCAmelCase )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(_UpperCAmelCase ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) ) )
with open(os.path.join(_UpperCAmelCase , "refs" , "main" ) ) as f:
UpperCAmelCase_ = f.read()
self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , "snapshots" , _UpperCAmelCase , _UpperCAmelCase ) )
self.assertTrue(os.path.isfile(_UpperCAmelCase ) )
# File is cached at the same place the second time.
UpperCAmelCase_ = cached_file(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
# Using a specific revision to test the full commit hash.
UpperCAmelCase_ = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision="9b8c223" )
self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , "snapshots" , _UpperCAmelCase , _UpperCAmelCase ) )
def lowercase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
with self.assertRaisesRegex(_UpperCAmelCase , "is not a valid model identifier" ):
UpperCAmelCase_ = cached_file("tiny-random-bert" , _UpperCAmelCase )
with self.assertRaisesRegex(_UpperCAmelCase , "is not a valid git identifier" ):
UpperCAmelCase_ = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision="aaaa" )
with self.assertRaisesRegex(_UpperCAmelCase , "does not appear to have a file named" ):
UpperCAmelCase_ = cached_file(_UpperCAmelCase , "conf" )
def lowercase__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
with self.assertRaisesRegex(_UpperCAmelCase , "does not appear to have a file named" ):
UpperCAmelCase_ = cached_file(_UpperCAmelCase , "conf" )
with open(os.path.join(_UpperCAmelCase , "refs" , "main" ) ) as f:
UpperCAmelCase_ = f.read()
self.assertTrue(os.path.isfile(os.path.join(_UpperCAmelCase , ".no_exist" , _UpperCAmelCase , "conf" ) ) )
UpperCAmelCase_ = cached_file(_UpperCAmelCase , "conf" , _raise_exceptions_for_missing_entries=_UpperCAmelCase )
self.assertIsNone(_UpperCAmelCase )
UpperCAmelCase_ = cached_file(_UpperCAmelCase , "conf" , local_files_only=_UpperCAmelCase , _raise_exceptions_for_missing_entries=_UpperCAmelCase )
self.assertIsNone(_UpperCAmelCase )
UpperCAmelCase_ = mock.Mock()
UpperCAmelCase_ = 500
UpperCAmelCase_ = {}
UpperCAmelCase_ = HTTPError
UpperCAmelCase_ = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=_UpperCAmelCase ) as mock_head:
UpperCAmelCase_ = cached_file(_UpperCAmelCase , "conf" , _raise_exceptions_for_connection_errors=_UpperCAmelCase )
self.assertIsNone(_UpperCAmelCase )
# This check we did call the fake head request
mock_head.assert_called()
def lowercase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
self.assertTrue(has_file("hf-internal-testing/tiny-bert-pt-only" , _UpperCAmelCase ) )
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , _UpperCAmelCase ) )
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , _UpperCAmelCase ) )
def lowercase__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
self.assertIsNone(get_file_from_repo("bert-base-cased" , "ahah.txt" ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(_UpperCAmelCase , "is not a valid model identifier" ):
get_file_from_repo("bert-base-case" , _UpperCAmelCase )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(_UpperCAmelCase , "is not a valid git identifier" ):
get_file_from_repo("bert-base-cased" , _UpperCAmelCase , revision="ahaha" )
UpperCAmelCase_ = get_file_from_repo("bert-base-cased" , _UpperCAmelCase )
# The name is the cached name which is not very easy to test, so instead we load the content.
UpperCAmelCase_ = json.loads(open(_UpperCAmelCase , "r" ).read() )
self.assertEqual(config["hidden_size"] , 768 )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ = Path(_UpperCAmelCase ) / "a.txt"
filename.touch()
self.assertEqual(get_file_from_repo(_UpperCAmelCase , "a.txt" ) , str(_UpperCAmelCase ) )
self.assertIsNone(get_file_from_repo(_UpperCAmelCase , "b.txt" ) )
| 241 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""",
"""RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""",
"""RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""",
"""RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""",
"""RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""",
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''rwkv'''
UpperCamelCase = {'''max_position_embeddings''': '''context_length'''}
def __init__( self : str , _UpperCAmelCase : int=50277 , _UpperCAmelCase : Optional[Any]=1024 , _UpperCAmelCase : str=4096 , _UpperCAmelCase : Any=32 , _UpperCAmelCase : Any=None , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : Tuple=1e-5 , _UpperCAmelCase : str=0 , _UpperCAmelCase : Any=0 , _UpperCAmelCase : Union[str, Any]=6 , _UpperCAmelCase : Dict=False , _UpperCAmelCase : Any=True , **_UpperCAmelCase : Optional[Any] , ) -> str:
'''simple docstring'''
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = context_length
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = attention_hidden_size if attention_hidden_size is not None else hidden_size
UpperCAmelCase_ = intermediate_size if intermediate_size is not None else 4 * hidden_size
UpperCAmelCase_ = layer_norm_epsilon
UpperCAmelCase_ = rescale_every
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = bos_token_id
UpperCAmelCase_ = eos_token_id
super().__init__(
tie_word_embeddings=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
| 241 | 1 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
A__ = logging.get_logger(__name__)
class __lowerCAmelCase ( lowerCamelCase__ ):
__lowerCamelCase = ['''pixel_values''']
def __init__( self , _snake_case = True , _snake_case = None , _snake_case = PILImageResampling.BILINEAR , _snake_case = True , _snake_case = 1 / 255 , _snake_case = True , _snake_case = None , _snake_case = True , **_snake_case , ):
"""simple docstring"""
super().__init__(**_snake_case )
_lowerCAmelCase = size if size is not None else {"""shortest_edge""": 224}
_lowerCAmelCase = get_size_dict(_snake_case , default_to_square=_snake_case )
_lowerCAmelCase = crop_size if crop_size is not None else {"""height""": 256, """width""": 256}
_lowerCAmelCase = get_size_dict(_snake_case , param_name="""crop_size""" )
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = resample
_lowerCAmelCase = do_rescale
_lowerCAmelCase = rescale_factor
_lowerCAmelCase = do_center_crop
_lowerCAmelCase = crop_size
_lowerCAmelCase = do_flip_channel_order
def snake_case ( self , _snake_case , _snake_case , _snake_case = PIL.Image.BILINEAR , _snake_case = None , **_snake_case , ):
"""simple docstring"""
_lowerCAmelCase = get_size_dict(_snake_case , default_to_square=_snake_case )
if "shortest_edge" not in size:
raise ValueError(F'The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}' )
_lowerCAmelCase = get_resize_output_image_size(_snake_case , size=size["""shortest_edge"""] , default_to_square=_snake_case )
return resize(_snake_case , size=_snake_case , resample=_snake_case , data_format=_snake_case , **_snake_case )
def snake_case ( self , _snake_case , _snake_case , _snake_case = None , **_snake_case , ):
"""simple docstring"""
_lowerCAmelCase = get_size_dict(_snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(_snake_case , size=(size["""height"""], size["""width"""]) , data_format=_snake_case , **_snake_case )
def snake_case ( self , _snake_case , _snake_case , _snake_case = None , **_snake_case , ):
"""simple docstring"""
return rescale(_snake_case , scale=_snake_case , data_format=_snake_case , **_snake_case )
def snake_case ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
return flip_channel_order(_snake_case , data_format=_snake_case )
def snake_case ( self , _snake_case , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = ChannelDimension.FIRST , **_snake_case , ):
"""simple docstring"""
_lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase = resample if resample is not None else self.resample
_lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCAmelCase = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
_lowerCAmelCase = size if size is not None else self.size
_lowerCAmelCase = get_size_dict(_snake_case , default_to_square=_snake_case )
_lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
_lowerCAmelCase = get_size_dict(_snake_case , param_name="""crop_size""" )
_lowerCAmelCase = make_list_of_images(_snake_case )
if not valid_images(_snake_case ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
# All transformations expect numpy arrays.
_lowerCAmelCase = [to_numpy_array(_snake_case ) for image in images]
if do_resize:
_lowerCAmelCase = [self.resize(image=_snake_case , size=_snake_case , resample=_snake_case ) for image in images]
if do_center_crop:
_lowerCAmelCase = [self.center_crop(image=_snake_case , size=_snake_case ) for image in images]
if do_rescale:
_lowerCAmelCase = [self.rescale(image=_snake_case , scale=_snake_case ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
_lowerCAmelCase = [self.flip_channel_order(image=_snake_case ) for image in images]
_lowerCAmelCase = [to_channel_dimension_format(_snake_case , _snake_case ) for image in images]
_lowerCAmelCase = {"""pixel_values""": images}
return BatchFeature(data=_snake_case , tensor_type=_snake_case )
def snake_case ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
_lowerCAmelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_snake_case ) != len(_snake_case ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(_snake_case ):
_lowerCAmelCase = target_sizes.numpy()
_lowerCAmelCase = []
for idx in range(len(_snake_case ) ):
_lowerCAmelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=_snake_case )
_lowerCAmelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_snake_case )
else:
_lowerCAmelCase = logits.argmax(dim=1 )
_lowerCAmelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 82 | import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
lowerCAmelCase__ : Dict = None
lowerCAmelCase__ : Tuple = logging.get_logger(__name__)
lowerCAmelCase__ : Union[str, Any] = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase__ : Dict = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
'''tokenizer_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''',
},
}
lowerCAmelCase__ : List[Any] = {
'''google/rembert''': 2_56,
}
lowerCAmelCase__ : List[str] = '''▁'''
class __snake_case ( _lowerCamelCase ):
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = RemBertTokenizer
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase="[CLS]" , __UpperCamelCase="[SEP]" , __UpperCamelCase="<unk>" , __UpperCamelCase="[SEP]" , __UpperCamelCase="<pad>" , __UpperCamelCase="[CLS]" , __UpperCamelCase="[MASK]" , **__UpperCamelCase , ) -> Tuple:
'''simple docstring'''
snake_case__ : Optional[Any] = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else mask_token
super().__init__(
__UpperCamelCase , tokenizer_file=__UpperCamelCase , do_lower_case=__UpperCamelCase , remove_space=__UpperCamelCase , keep_accents=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , pad_token=__UpperCamelCase , cls_token=__UpperCamelCase , mask_token=__UpperCamelCase , **__UpperCamelCase , )
snake_case__ : int = do_lower_case
snake_case__ : Any = remove_space
snake_case__ : List[Any] = keep_accents
snake_case__ : Dict = vocab_file
snake_case__ : int = False if not self.vocab_file else True
def __a ( self , __UpperCamelCase , __UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
snake_case__ : Dict = [self.sep_token_id]
snake_case__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __a ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__UpperCamelCase )) + [1] + ([0] * len(__UpperCamelCase )) + [1]
return [1] + ([0] * len(__UpperCamelCase )) + [1]
def __a ( self , __UpperCamelCase , __UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
snake_case__ : List[Any] = [self.sep_token_id]
snake_case__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self , __UpperCamelCase , __UpperCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__UpperCamelCase ):
logger.error('Vocabulary path ({}) should be a directory'.format(__UpperCamelCase ) )
return
snake_case__ : List[str] = os.path.join(
__UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ):
copyfile(self.vocab_file , __UpperCamelCase )
return (out_vocab_file,)
| 143 | 0 |
_UpperCamelCase = '''Alexander Joslin'''
import operator as op
from .stack import Stack
def UpperCamelCase_( snake_case__: str ) -> int:
UpperCAmelCase__ = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
UpperCAmelCase__ = Stack()
UpperCAmelCase__ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(snake_case__ ) )
elif i in operators:
# RULE 2
operator_stack.push(snake_case__ )
elif i == ")":
# RULE 4
UpperCAmelCase__ = operator_stack.peek()
operator_stack.pop()
UpperCAmelCase__ = operand_stack.peek()
operand_stack.pop()
UpperCAmelCase__ = operand_stack.peek()
operand_stack.pop()
UpperCAmelCase__ = operators[opr](snake_case__ , snake_case__ )
operand_stack.push(snake_case__ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
_UpperCamelCase = '''(5 + ((4 * 2) * (2 + 3)))'''
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 335 |
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def UpperCamelCase_( snake_case__: Union[str, Any] , snake_case__: Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase__ = XCLIPTextConfig()
# derive patch size from model name
UpperCAmelCase__ = model_name.find('patch' )
UpperCAmelCase__ = int(model_name[start_idx + len('patch' ) : start_idx + len('patch' ) + 2] )
UpperCAmelCase__ = XCLIPVisionConfig(patch_size=snake_case__ , num_frames=snake_case__ )
if "large" in model_name:
UpperCAmelCase__ = 7_68
UpperCAmelCase__ = 30_72
UpperCAmelCase__ = 12
UpperCAmelCase__ = 10_24
UpperCAmelCase__ = 40_96
UpperCAmelCase__ = 16
UpperCAmelCase__ = 24
UpperCAmelCase__ = 7_68
UpperCAmelCase__ = 30_72
if model_name == "xclip-large-patch14-16-frames":
UpperCAmelCase__ = 3_36
UpperCAmelCase__ = XCLIPConfig.from_text_vision_configs(snake_case__ , snake_case__ )
if "large" in model_name:
UpperCAmelCase__ = 7_68
return config
def UpperCamelCase_( snake_case__: Any ) -> Tuple:
# text encoder
if name == "token_embedding.weight":
UpperCAmelCase__ = name.replace('token_embedding.weight' , 'text_model.embeddings.token_embedding.weight' )
if name == "positional_embedding":
UpperCAmelCase__ = name.replace('positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "ln_1" in name:
UpperCAmelCase__ = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
UpperCAmelCase__ = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
UpperCAmelCase__ = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
UpperCAmelCase__ = name.replace('c_proj' , 'fc2' )
if name.startswith('transformer.resblocks' ):
UpperCAmelCase__ = name.replace('transformer.resblocks' , 'text_model.encoder.layers' )
if "attn.out_proj" in name and "message" not in name:
UpperCAmelCase__ = name.replace('attn.out_proj' , 'self_attn.out_proj' )
if "ln_final" in name:
UpperCAmelCase__ = name.replace('ln_final' , 'text_model.final_layer_norm' )
# visual encoder
if name == "visual.class_embedding":
UpperCAmelCase__ = name.replace('visual.class_embedding' , 'vision_model.embeddings.class_embedding' )
if name == "visual.positional_embedding":
UpperCAmelCase__ = name.replace('visual.positional_embedding' , 'vision_model.embeddings.position_embedding.weight' )
if name.startswith('visual.transformer.resblocks' ):
UpperCAmelCase__ = name.replace('visual.transformer.resblocks' , 'vision_model.encoder.layers' )
if "visual.conv1" in name:
UpperCAmelCase__ = name.replace('visual.conv1' , 'vision_model.embeddings.patch_embedding' )
if "visual.ln_pre" in name:
UpperCAmelCase__ = name.replace('visual.ln_pre' , 'vision_model.pre_layernorm' )
if "visual.ln_post" in name:
UpperCAmelCase__ = name.replace('visual.ln_post' , 'vision_model.post_layernorm' )
if "visual.proj" in name:
UpperCAmelCase__ = name.replace('visual.proj' , 'visual_projection.weight' )
if "text_projection" in name:
UpperCAmelCase__ = name.replace('text_projection' , 'text_projection.weight' )
# things on top
if "prompts_visual_proj" in name:
UpperCAmelCase__ = name.replace('prompts_visual_proj' , 'prompts_visual_projection' )
if "prompts_visual_ln" in name:
UpperCAmelCase__ = name.replace('prompts_visual_ln' , 'prompts_visual_layernorm' )
# mit
if name == "mit.positional_embedding":
UpperCAmelCase__ = name.replace('positional' , 'position' )
if name.startswith('mit.resblocks' ):
UpperCAmelCase__ = name.replace('mit.resblocks' , 'mit.encoder.layers' )
# prompts generator
if name.startswith('prompts_generator.norm' ):
UpperCAmelCase__ = name.replace('prompts_generator.norm' , 'prompts_generator.layernorm' )
return name
def UpperCamelCase_( snake_case__: Union[str, Any] , snake_case__: List[Any] ) -> Optional[Any]:
for key in orig_state_dict.copy().keys():
UpperCAmelCase__ = orig_state_dict.pop(snake_case__ )
if "attn.in_proj" in key:
UpperCAmelCase__ = key.split('.' )
if key.startswith('visual' ):
UpperCAmelCase__ = key_split[3]
UpperCAmelCase__ = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
UpperCAmelCase__ = val[
:dim, :
]
UpperCAmelCase__ = val[
dim : dim * 2, :
]
UpperCAmelCase__ = val[
-dim:, :
]
else:
UpperCAmelCase__ = val[
:dim
]
UpperCAmelCase__ = val[
dim : dim * 2
]
UpperCAmelCase__ = val[
-dim:
]
else:
if "weight" in key:
UpperCAmelCase__ = val[
:dim, :
]
UpperCAmelCase__ = val[
dim : dim * 2, :
]
UpperCAmelCase__ = val[
-dim:, :
]
else:
UpperCAmelCase__ = val[:dim]
UpperCAmelCase__ = val[
dim : dim * 2
]
UpperCAmelCase__ = val[-dim:]
elif key.startswith('mit' ):
UpperCAmelCase__ = key_split[2]
UpperCAmelCase__ = config.vision_config.mit_hidden_size
if "weight" in key:
UpperCAmelCase__ = val[:dim, :]
UpperCAmelCase__ = val[dim : dim * 2, :]
UpperCAmelCase__ = val[-dim:, :]
else:
UpperCAmelCase__ = val[:dim]
UpperCAmelCase__ = val[dim : dim * 2]
UpperCAmelCase__ = val[-dim:]
else:
UpperCAmelCase__ = key_split[2]
UpperCAmelCase__ = config.text_config.hidden_size
if "weight" in key:
UpperCAmelCase__ = val[:dim, :]
UpperCAmelCase__ = val[
dim : dim * 2, :
]
UpperCAmelCase__ = val[-dim:, :]
else:
UpperCAmelCase__ = val[:dim]
UpperCAmelCase__ = val[
dim : dim * 2
]
UpperCAmelCase__ = val[-dim:]
else:
UpperCAmelCase__ = rename_key(snake_case__ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
UpperCAmelCase__ = val.T
UpperCAmelCase__ = val
return orig_state_dict
def UpperCamelCase_( snake_case__: Tuple ) -> Optional[Any]:
if num_frames == 8:
UpperCAmelCase__ = 'eating_spaghetti_8_frames.npy'
elif num_frames == 16:
UpperCAmelCase__ = 'eating_spaghetti.npy'
elif num_frames == 32:
UpperCAmelCase__ = 'eating_spaghetti_32_frames.npy'
UpperCAmelCase__ = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename=snake_case__ , repo_type='dataset' , )
UpperCAmelCase__ = np.load(snake_case__ )
return list(snake_case__ )
def UpperCamelCase_( snake_case__: Tuple , snake_case__: str=None , snake_case__: Union[str, Any]=False ) -> List[Any]:
UpperCAmelCase__ = {
# fully supervised kinetics-400 checkpoints
'xclip-base-patch32': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth',
'xclip-base-patch32-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'
),
'xclip-base-patch16': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth',
'xclip-base-patch16-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'
),
'xclip-large-patch14': 'https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb',
'xclip-large-patch14-16-frames': 'https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f',
# fully supervised kinetics-600 checkpoints
'xclip-base-patch16-kinetics-600': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'
),
'xclip-base-patch16-kinetics-600-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'
),
'xclip-large-patch14-kinetics-600': 'https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be',
# few shot
'xclip-base-patch16-hmdb-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'
),
'xclip-base-patch16-hmdb-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'
),
'xclip-base-patch16-hmdb-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'
),
'xclip-base-patch16-hmdb-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'
),
'xclip-base-patch16-ucf-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'
),
'xclip-base-patch16-ucf-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'
),
'xclip-base-patch16-ucf-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'
),
'xclip-base-patch16-ucf-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'
),
# zero shot
'xclip-base-patch16-zero-shot': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth',
}
UpperCAmelCase__ = model_to_url[model_name]
UpperCAmelCase__ = 8
if "16-frames" in model_name:
UpperCAmelCase__ = 16
elif "shot" in model_name:
UpperCAmelCase__ = 32
UpperCAmelCase__ = get_xclip_config(snake_case__ , snake_case__ )
UpperCAmelCase__ = XCLIPModel(snake_case__ )
model.eval()
if "drive" in checkpoint_url:
UpperCAmelCase__ = 'pytorch_model.bin'
gdown.cached_download(snake_case__ , snake_case__ , quiet=snake_case__ )
UpperCAmelCase__ = torch.load(snake_case__ , map_location='cpu' )['model']
else:
UpperCAmelCase__ = torch.hub.load_state_dict_from_url(snake_case__ )['model']
UpperCAmelCase__ = convert_state_dict(snake_case__ , snake_case__ )
UpperCAmelCase__ = XCLIPModel(snake_case__ )
UpperCAmelCase__ , UpperCAmelCase__ = model.load_state_dict(snake_case__ , strict=snake_case__ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
UpperCAmelCase__ = 3_36 if model_name == 'xclip-large-patch14-16-frames' else 2_24
UpperCAmelCase__ = VideoMAEImageProcessor(size=snake_case__ )
UpperCAmelCase__ = CLIPTokenizer.from_pretrained('openai/clip-vit-base-patch32' )
UpperCAmelCase__ = CLIPTokenizerFast.from_pretrained('openai/clip-vit-base-patch32' )
UpperCAmelCase__ = XCLIPProcessor(image_processor=snake_case__ , tokenizer=snake_case__ )
UpperCAmelCase__ = prepare_video(snake_case__ )
UpperCAmelCase__ = processor(
text=['playing sports', 'eating spaghetti', 'go shopping'] , videos=snake_case__ , return_tensors='pt' , padding=snake_case__ )
print('Shape of pixel values:' , inputs.pixel_values.shape )
with torch.no_grad():
UpperCAmelCase__ = model(**snake_case__ )
# Verify outputs
UpperCAmelCase__ = outputs.logits_per_video
UpperCAmelCase__ = logits_per_video.softmax(dim=1 )
print('Probs:' , snake_case__ )
# kinetics-400
if model_name == "xclip-base-patch32":
UpperCAmelCase__ = torch.tensor([[0.0_0_1_9, 0.9_9_5_1, 0.0_0_3_0]] )
elif model_name == "xclip-base-patch32-16-frames":
UpperCAmelCase__ = torch.tensor([[7.0_999e-04, 9.9_883e-01, 4.5_580e-04]] )
elif model_name == "xclip-base-patch16":
UpperCAmelCase__ = torch.tensor([[0.0_0_8_3, 0.9_6_8_1, 0.0_2_3_6]] )
elif model_name == "xclip-base-patch16-16-frames":
UpperCAmelCase__ = torch.tensor([[7.6_937e-04, 9.9_728e-01, 1.9_473e-03]] )
elif model_name == "xclip-large-patch14":
UpperCAmelCase__ = torch.tensor([[0.0_0_6_2, 0.9_8_6_4, 0.0_0_7_5]] )
elif model_name == "xclip-large-patch14-16-frames":
UpperCAmelCase__ = torch.tensor([[3.3_877e-04, 9.9_937e-01, 2.8_888e-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
UpperCAmelCase__ = torch.tensor([[0.0_5_5_5, 0.8_9_1_4, 0.0_5_3_1]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
UpperCAmelCase__ = torch.tensor([[3.8_554e-04, 9.9_929e-01, 3.2_754e-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
UpperCAmelCase__ = torch.tensor([[0.0_0_3_6, 0.9_9_2_0, 0.0_0_4_5]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
UpperCAmelCase__ = torch.tensor([[7.1_890e-06, 9.9_994e-01, 5.6_559e-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
UpperCAmelCase__ = torch.tensor([[1.0_320e-05, 9.9_993e-01, 6.2_435e-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
UpperCAmelCase__ = torch.tensor([[4.1_377e-06, 9.9_990e-01, 9.8_386e-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
UpperCAmelCase__ = torch.tensor([[4.1_347e-05, 9.9_962e-01, 3.3_411e-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
UpperCAmelCase__ = torch.tensor([[8.5_857e-05, 9.9_928e-01, 6.3_291e-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
UpperCAmelCase__ = torch.tensor([[8.5_857e-05, 9.9_928e-01, 6.3_291e-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
UpperCAmelCase__ = torch.tensor([[0.0_0_2_7, 0.9_9_0_4, 0.0_0_7_0]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
UpperCAmelCase__ = torch.tensor([[9.8_219e-04, 9.9_593e-01, 3.0_863e-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
UpperCAmelCase__ = torch.tensor([[3.5_082e-04, 9.9_785e-01, 1.7_966e-03]] )
else:
raise ValueError(f"Model name {model_name} not supported" )
assert torch.allclose(snake_case__ , snake_case__ , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
if push_to_hub:
print('Pushing model, processor and slow tokenizer files to the hub...' )
model.push_to_hub(snake_case__ , organization='nielsr' )
processor.push_to_hub(snake_case__ , organization='nielsr' )
slow_tokenizer.push_to_hub(snake_case__ , organization='nielsr' )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''xclip-base-patch32''',
type=str,
help='''Name of the model.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_UpperCamelCase = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 335 | 1 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class __a :
# setable values
__lowercase : Optional[int] = None
__lowercase : Optional[jnp.ndarray] = None
__lowercase : Optional[jnp.ndarray] = None # sigma(t_i)
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls ) -> Any:
'''simple docstring'''
return cls()
@dataclass
class __a ( __UpperCamelCase ):
__lowercase : jnp.ndarray
__lowercase : jnp.ndarray
__lowercase : KarrasVeSchedulerState
class __a ( __UpperCamelCase , __UpperCamelCase ):
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
return True
@register_to_config
def __init__( self , lowerCAmelCase__ = 0.0_2 , lowerCAmelCase__ = 100 , lowerCAmelCase__ = 1.0_0_7 , lowerCAmelCase__ = 80 , lowerCAmelCase__ = 0.0_5 , lowerCAmelCase__ = 50 , ) -> List[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
return KarrasVeSchedulerState.create()
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = () ) -> KarrasVeSchedulerState:
'''simple docstring'''
lowercase__: Tuple = jnp.arange(0 , lowerCAmelCase__ )[::-1].copy()
lowercase__: Optional[Any] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=lowerCAmelCase__ , schedule=jnp.array(lowerCAmelCase__ , dtype=jnp.floataa ) , timesteps=lowerCAmelCase__ , )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> Tuple[jnp.ndarray, float]:
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
lowercase__: List[Any] = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
lowercase__: Optional[Any] = 0
# sample eps ~ N(0, S_noise^2 * I)
lowercase__: List[str] = random.split(lowerCAmelCase__ , num=1 )
lowercase__: Dict = self.config.s_noise * random.normal(key=lowerCAmelCase__ , shape=sample.shape )
lowercase__: Optional[Any] = sigma + gamma * sigma
lowercase__: List[str] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
'''simple docstring'''
lowercase__: Dict = sample_hat + sigma_hat * model_output
lowercase__: Tuple = (sample_hat - pred_original_sample) / sigma_hat
lowercase__: Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=lowerCAmelCase__ , derivative=lowerCAmelCase__ , state=lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
'''simple docstring'''
lowercase__: List[Any] = sample_prev + sigma_prev * model_output
lowercase__: Any = (sample_prev - pred_original_sample) / sigma_prev
lowercase__: Optional[int] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=lowerCAmelCase__ , derivative=lowerCAmelCase__ , state=lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
raise NotImplementedError()
| 196 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowerCAmelCase = {
'''configuration_encodec''': [
'''ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EncodecConfig''',
],
'''feature_extraction_encodec''': ['''EncodecFeatureExtractor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EncodecModel''',
'''EncodecPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 196 | 1 |
'''simple docstring'''
def __lowercase ( __lowercase ) -> Union[str, Any]:
'''simple docstring'''
if not nums: # Makes sure that the list is not empty
raise ValueError("List is empty" )
_A = sum(a_ ) / len(a_ ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(a_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 367 |
'''simple docstring'''
import operator
def __lowercase ( __lowercase , __lowercase = False , __lowercase = None ) -> list:
'''simple docstring'''
_A = operator.lt if reverse else operator.gt
_A = solution or []
if not arr:
return solution
_A = [arr.pop(0 )]
for i, item in enumerate(__lowercase ):
if _operator(__lowercase , sublist[-1] ):
sublist.append(__lowercase )
arr.pop(__lowercase )
# merging sublist into solution list
if not solution:
solution.extend(__lowercase )
else:
while sublist:
_A = sublist.pop(0 )
for i, xx in enumerate(__lowercase ):
if not _operator(__lowercase , __lowercase ):
solution.insert(__lowercase , __lowercase )
break
else:
solution.append(__lowercase )
strand_sort(__lowercase , __lowercase , __lowercase )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 174 | 0 |
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : int ) -> Tuple:
"""simple docstring"""
return params[f"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :]
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : Any="attention" ) -> str:
"""simple docstring"""
UpperCamelCase :str = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :] )
UpperCamelCase :Optional[Any] = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
UpperCamelCase :Dict = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :] )
UpperCamelCase :List[str] = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
UpperCamelCase :List[str] = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :] )
UpperCamelCase :int = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
UpperCamelCase :Optional[Any] = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :] )
UpperCamelCase :int = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : Any=False ) -> Tuple:
"""simple docstring"""
if split_mlp_wi:
UpperCamelCase :Optional[Any] = params[f"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :]
UpperCamelCase :List[str] = params[f"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :]
UpperCamelCase :Dict = (wi_a, wi_a)
else:
UpperCamelCase :Union[str, Any] = params[f"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :]
UpperCamelCase :Tuple = params[f"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :]
return wi, wo
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : List[str] ) -> List[str]:
"""simple docstring"""
return params[f"""{prefix}/{prefix}/{layer_name}/scale"""][:, i]
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : dict , *, __magic_name__ : int , __magic_name__ : bool , __magic_name__ : bool = False ) -> Dict:
"""simple docstring"""
UpperCamelCase :Dict = traverse_util.flatten_dict(variables["""target"""] )
UpperCamelCase :int = {"""/""".join(__magic_name__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
UpperCamelCase :str = """encoder/encoder/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , __magic_name__ )
UpperCamelCase :Optional[int] = collections.OrderedDict()
# Shared embeddings.
UpperCamelCase :Dict = old["""token_embedder/embedding"""]
# Encoder.
for i in range(__magic_name__ ):
# Block i, layer 0 (Self Attention).
UpperCamelCase :Optional[int] = tax_layer_norm_lookup(__magic_name__ , __magic_name__ , """encoder""" , """pre_attention_layer_norm""" )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :Optional[Any] = tax_attention_lookup(__magic_name__ , __magic_name__ , """encoder""" , """attention""" )
UpperCamelCase :Union[str, Any] = layer_norm
UpperCamelCase :Tuple = k.T
UpperCamelCase :Optional[Any] = o.T
UpperCamelCase :Dict = q.T
UpperCamelCase :List[str] = v.T
# Block i, layer 1 (MLP).
UpperCamelCase :Optional[Any] = tax_layer_norm_lookup(__magic_name__ , __magic_name__ , """encoder""" , """pre_mlp_layer_norm""" )
UpperCamelCase , UpperCamelCase :Dict = tax_mlp_lookup(__magic_name__ , __magic_name__ , """encoder""" , __magic_name__ )
UpperCamelCase :List[Any] = layer_norm
if split_mlp_wi:
UpperCamelCase :Any = wi[0].T
UpperCamelCase :Optional[Any] = wi[1].T
else:
UpperCamelCase :Optional[int] = wi.T
UpperCamelCase :Any = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
UpperCamelCase :Optional[Any] = tax_relpos_bias_lookup(
__magic_name__ , __magic_name__ , """encoder""" ).T
UpperCamelCase :Tuple = old["""encoder/encoder_norm/scale"""]
if not scalable_attention:
UpperCamelCase :Dict = tax_relpos_bias_lookup(
__magic_name__ , 0 , """encoder""" ).T
UpperCamelCase :Tuple = tax_relpos_bias_lookup(
__magic_name__ , 0 , """decoder""" ).T
if not is_encoder_only:
# Decoder.
for i in range(__magic_name__ ):
# Block i, layer 0 (Self Attention).
UpperCamelCase :Dict = tax_layer_norm_lookup(__magic_name__ , __magic_name__ , """decoder""" , """pre_self_attention_layer_norm""" )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :Dict = tax_attention_lookup(__magic_name__ , __magic_name__ , """decoder""" , """self_attention""" )
UpperCamelCase :int = layer_norm
UpperCamelCase :Optional[Any] = k.T
UpperCamelCase :Tuple = o.T
UpperCamelCase :Optional[Any] = q.T
UpperCamelCase :int = v.T
# Block i, layer 1 (Cross Attention).
UpperCamelCase :str = tax_layer_norm_lookup(__magic_name__ , __magic_name__ , """decoder""" , """pre_cross_attention_layer_norm""" )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :Tuple = tax_attention_lookup(__magic_name__ , __magic_name__ , """decoder""" , """encoder_decoder_attention""" )
UpperCamelCase :str = layer_norm
UpperCamelCase :List[str] = k.T
UpperCamelCase :Optional[int] = o.T
UpperCamelCase :Union[str, Any] = q.T
UpperCamelCase :Tuple = v.T
# Block i, layer 2 (MLP).
UpperCamelCase :Optional[Any] = tax_layer_norm_lookup(__magic_name__ , __magic_name__ , """decoder""" , """pre_mlp_layer_norm""" )
UpperCamelCase , UpperCamelCase :Optional[int] = tax_mlp_lookup(__magic_name__ , __magic_name__ , """decoder""" , __magic_name__ )
UpperCamelCase :str = layer_norm
if split_mlp_wi:
UpperCamelCase :Any = wi[0].T
UpperCamelCase :int = wi[1].T
else:
UpperCamelCase :List[str] = wi.T
UpperCamelCase :Union[str, Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
UpperCamelCase :Union[str, Any] = tax_relpos_bias_lookup(__magic_name__ , __magic_name__ , """decoder""" ).T
UpperCamelCase :int = old["""decoder/decoder_norm/scale"""]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
UpperCamelCase :Optional[Any] = old["""decoder/logits_dense/kernel"""].T
return new
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[int] , __magic_name__ : bool ) -> List[Any]:
"""simple docstring"""
UpperCamelCase :Union[str, Any] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
UpperCamelCase :int = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
UpperCamelCase :List[str] = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
UpperCamelCase :Optional[Any] = state_dict["""shared.weight"""]
return state_dict
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : List[str] ) -> int:
"""simple docstring"""
UpperCamelCase :Any = checkpoints.load_tax_checkpoint(__magic_name__ )
UpperCamelCase :int = convert_tax_to_pytorch(
__magic_name__ , num_layers=config.num_layers , is_encoder_only=__magic_name__ , scalable_attention=__magic_name__ )
UpperCamelCase :Any = make_state_dict(__magic_name__ , __magic_name__ )
model.load_state_dict(__magic_name__ , strict=__magic_name__ )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str , __magic_name__ : Dict , __magic_name__ : str , __magic_name__ : bool = False , __magic_name__ : bool = False , ) -> Dict:
"""simple docstring"""
UpperCamelCase :Optional[Any] = MTaConfig.from_json_file(__magic_name__ )
print(f"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
UpperCamelCase :List[str] = UMTaEncoderModel(__magic_name__ )
else:
UpperCamelCase :List[Any] = UMTaForConditionalGeneration(__magic_name__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__magic_name__ )
# Verify that we can load the checkpoint.
model.from_pretrained(__magic_name__ )
print("""Done""" )
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
UpperCAmelCase_ : Tuple = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 38 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
snake_case__ : Any = StableDiffusionXLImgaImgPipeline
snake_case__ : Tuple = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
snake_case__ : Tuple = PipelineTesterMixin.required_optional_params - {"""latents"""}
snake_case__ : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
snake_case__ : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS
snake_case__ : Tuple = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _A ( self : int ):
torch.manual_seed(0 )
UpperCamelCase :Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=__lowerCamelCase , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
UpperCamelCase :Tuple = EulerDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , )
torch.manual_seed(0 )
UpperCamelCase :Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
UpperCamelCase :Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=32 , )
UpperCamelCase :Any = CLIPTextModel(__lowerCamelCase )
UpperCamelCase :List[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__lowerCamelCase )
UpperCamelCase :List[Any] = CLIPTextModelWithProjection(__lowerCamelCase )
UpperCamelCase :int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__lowerCamelCase )
UpperCamelCase :Union[str, Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""text_encoder_2""": text_encoder_a,
"""tokenizer_2""": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def _A ( self : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any]=0 ):
UpperCamelCase :Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
UpperCamelCase :List[str] = image / 2 + 0.5
if str(__lowerCamelCase ).startswith("""mps""" ):
UpperCamelCase :Any = torch.manual_seed(__lowerCamelCase )
else:
UpperCamelCase :List[Any] = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
UpperCamelCase :str = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 5.0,
"""output_type""": """numpy""",
"""strength""": 0.75,
}
return inputs
def _A ( self : str ):
UpperCamelCase :List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase :Optional[Any] = self.get_dummy_components()
UpperCamelCase :List[Any] = StableDiffusionXLImgaImgPipeline(**__lowerCamelCase )
UpperCamelCase :Any = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCamelCase :Optional[Any] = self.get_dummy_inputs(__lowerCamelCase )
UpperCamelCase :Union[str, Any] = sd_pipe(**__lowerCamelCase ).images
UpperCamelCase :Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCamelCase :List[Any] = np.array([0.4656, 0.4840, 0.4439, 0.6698, 0.5574, 0.4524, 0.5799, 0.5943, 0.5165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _A ( self : Dict ):
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def _A ( self : Optional[Any] ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def _A ( self : Union[str, Any] ):
pass
def _A ( self : Optional[int] ):
UpperCamelCase :Union[str, Any] = self.get_dummy_components()
UpperCamelCase :Dict = StableDiffusionXLImgaImgPipeline(**__lowerCamelCase )
UpperCamelCase :List[Any] = sd_pipe.to(__lowerCamelCase )
UpperCamelCase :List[str] = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
# forward without prompt embeds
UpperCamelCase :List[Any] = self.get_dummy_inputs(__lowerCamelCase )
UpperCamelCase :int = 3 * ["""this is a negative prompt"""]
UpperCamelCase :Union[str, Any] = negative_prompt
UpperCamelCase :Union[str, Any] = 3 * [inputs["""prompt"""]]
UpperCamelCase :Dict = sd_pipe(**__lowerCamelCase )
UpperCamelCase :Union[str, Any] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
UpperCamelCase :Union[str, Any] = self.get_dummy_inputs(__lowerCamelCase )
UpperCamelCase :Optional[int] = 3 * ["""this is a negative prompt"""]
UpperCamelCase :Union[str, Any] = 3 * [inputs.pop("""prompt""" )]
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) :Union[str, Any] = sd_pipe.encode_prompt(__lowerCamelCase , negative_prompt=__lowerCamelCase )
UpperCamelCase :Dict = sd_pipe(
**__lowerCamelCase , prompt_embeds=__lowerCamelCase , negative_prompt_embeds=__lowerCamelCase , pooled_prompt_embeds=__lowerCamelCase , negative_pooled_prompt_embeds=__lowerCamelCase , )
UpperCamelCase :Union[str, Any] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _A ( self : Tuple ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : Dict="cpu" , __lowerCamelCase : List[Any]=torch.floataa , __lowerCamelCase : Tuple=0 ):
UpperCamelCase :Optional[int] = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
UpperCamelCase :Optional[Any] = np.random.RandomState(__lowerCamelCase ).standard_normal((1, 4, 64, 64) )
UpperCamelCase :Dict = torch.from_numpy(__lowerCamelCase ).to(device=__lowerCamelCase , dtype=__lowerCamelCase )
UpperCamelCase :str = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _A ( self : Optional[Any] ):
UpperCamelCase :Any = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCamelCase :Optional[Any] = self.get_inputs(__lowerCamelCase )
UpperCamelCase :Optional[int] = pipe(**__lowerCamelCase ).images
UpperCamelCase :Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
UpperCamelCase :Union[str, Any] = np.array([0.49493, 0.47896, 0.40798, 0.54214, 0.53212, 0.48202, 0.47656, 0.46329, 0.48506] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 38 | 1 |
"""simple docstring"""
class SCREAMING_SNAKE_CASE__ :
def __init__( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Dict = 0
UpperCAmelCase : Any = 0
UpperCAmelCase : Tuple = {}
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
if vertex not in self.adjacency:
UpperCAmelCase : Tuple = {}
self.num_vertices += 1
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
self.add_vertex(_SCREAMING_SNAKE_CASE )
self.add_vertex(_SCREAMING_SNAKE_CASE )
if head == tail:
return
UpperCAmelCase : str = weight
UpperCAmelCase : Optional[Any] = weight
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.get_edges()
for edge in edges:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple = edge
edges.remove((tail, head, weight) )
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
UpperCAmelCase : Dict = list(edges[i] )
edges.sort(key=lambda _SCREAMING_SNAKE_CASE : e[2] )
for i in range(len(_SCREAMING_SNAKE_CASE ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
UpperCAmelCase : Union[str, Any] = edges[i][2] + 1
for edge in edges:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str = edge
UpperCAmelCase : List[Any] = weight
UpperCAmelCase : Any = weight
def __str__( self ) -> str:
'''simple docstring'''
UpperCAmelCase : int = """"""
for tail in self.adjacency:
for head in self.adjacency[tail]:
UpperCAmelCase : int = self.adjacency[head][tail]
string += F"{head} -> {tail} == {weight}\n"
return string.rstrip("""\n""" )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : List[str] = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
return self.adjacency.keys()
@staticmethod
def SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Any:
'''simple docstring'''
UpperCAmelCase : List[Any] = Graph()
if vertices is None:
UpperCAmelCase : str = []
if edges is None:
UpperCAmelCase : int = []
for vertex in vertices:
g.add_vertex(_SCREAMING_SNAKE_CASE )
for edge in edges:
g.add_edge(*_SCREAMING_SNAKE_CASE )
return g
class SCREAMING_SNAKE_CASE__ :
def __init__( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : str = {}
UpperCAmelCase : List[str] = {}
def __len__( self ) -> int:
'''simple docstring'''
return len(self.parent )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
if item in self.parent:
return self.find(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = item
UpperCAmelCase : Union[str, Any] = 0
return item
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
if item not in self.parent:
return self.make_set(_SCREAMING_SNAKE_CASE )
if item != self.parent[item]:
UpperCAmelCase : List[Any] = self.find(self.parent[item] )
return self.parent[item]
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.find(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = self.find(_SCREAMING_SNAKE_CASE )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
UpperCAmelCase : Tuple = roota
return roota
if self.rank[roota] < self.rank[roota]:
UpperCAmelCase : Any = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
UpperCAmelCase : str = roota
return roota
return None
@staticmethod
def SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : List[str] = graph.num_vertices
UpperCAmelCase : List[str] = Graph.UnionFind()
UpperCAmelCase : str = []
while num_components > 1:
UpperCAmelCase : Optional[int] = {}
for vertex in graph.get_vertices():
UpperCAmelCase : Optional[int] = -1
UpperCAmelCase : Tuple = graph.get_edges()
for edge in edges:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = edge
edges.remove((tail, head, weight) )
for edge in edges:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[int] = edge
UpperCAmelCase : List[str] = union_find.find(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = union_find.find(_SCREAMING_SNAKE_CASE )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
UpperCAmelCase : Tuple = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
UpperCAmelCase : List[str] = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = cheap_edge[vertex]
if union_find.find(_SCREAMING_SNAKE_CASE ) != union_find.find(_SCREAMING_SNAKE_CASE ):
union_find.union(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
mst_edges.append(cheap_edge[vertex] )
UpperCAmelCase : int = num_components - 1
UpperCAmelCase : List[str] = Graph.build(edges=_SCREAMING_SNAKE_CASE )
return mst
| 76 |
"""simple docstring"""
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
A: Optional[Any] = logging.get_logger(__name__)
def _snake_case ( UpperCamelCase : Tuple , UpperCamelCase : Tuple , UpperCamelCase : Optional[int] ):
UpperCAmelCase : Any = os.path.abspath(UpperCamelCase )
logger.info(F"Converting TensorFlow checkpoint from {tf_path}" )
# Load weights from TF model
UpperCAmelCase : int = tf.train.list_variables(UpperCamelCase )
UpperCAmelCase : List[str] = []
UpperCAmelCase : int = []
UpperCAmelCase : int = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
UpperCAmelCase : List[str] = full_name.split("""/""" )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(F"Skipping non-model layer {full_name}" )
continue
if "optimizer" in full_name:
logger.info(F"Skipping optimization layer {full_name}" )
continue
if name[0] == "model":
# ignore initial 'model'
UpperCAmelCase : str = name[1:]
# figure out how many levels deep the name is
UpperCAmelCase : Union[str, Any] = 0
for _name in name:
if _name.startswith("""layer_with_weights""" ):
depth += 1
else:
break
layer_depth.append(UpperCamelCase )
# read data
UpperCAmelCase : int = tf.train.load_variable(UpperCamelCase , UpperCamelCase )
names.append("""/""".join(UpperCamelCase ) )
arrays.append(UpperCamelCase )
logger.info(F"Read a total of {len(UpperCamelCase ):,} layers" )
# Sanity check
if len(set(UpperCamelCase ) ) != 1:
raise ValueError(F"Found layer names with different depths (layer depth {list(set(UpperCamelCase ) )})" )
UpperCAmelCase : int = list(set(UpperCamelCase ) )[0]
if layer_depth != 1:
raise ValueError(
"""The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP"""
""" heads.""" )
# convert layers
logger.info("""Converting weights...""" )
for full_name, array in zip(UpperCamelCase , UpperCamelCase ):
UpperCAmelCase : List[Any] = full_name.split("""/""" )
UpperCAmelCase : Optional[Any] = model
UpperCAmelCase : str = []
for i, m_name in enumerate(UpperCamelCase ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith("""layer_with_weights""" ):
UpperCAmelCase : List[Any] = int(m_name.split("""-""" )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(["""embeddings""", """LayerNorm"""] )
UpperCAmelCase : Optional[Any] = getattr(UpperCamelCase , """embeddings""" )
UpperCAmelCase : List[str] = getattr(UpperCamelCase , """LayerNorm""" )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(["""encoder""", """layer""", str(layer_num - 4 )] )
UpperCAmelCase : List[Any] = getattr(UpperCamelCase , """encoder""" )
UpperCAmelCase : Dict = getattr(UpperCamelCase , """layer""" )
UpperCAmelCase : str = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(["""pooler""", """dense"""] )
UpperCAmelCase : Optional[Any] = getattr(UpperCamelCase , """pooler""" )
UpperCAmelCase : Dict = getattr(UpperCamelCase , """dense""" )
elif m_name == "embeddings":
trace.append("""embeddings""" )
UpperCAmelCase : Optional[Any] = getattr(UpperCamelCase , """embeddings""" )
if layer_num == 0:
trace.append("""word_embeddings""" )
UpperCAmelCase : int = getattr(UpperCamelCase , """word_embeddings""" )
elif layer_num == 1:
trace.append("""position_embeddings""" )
UpperCAmelCase : str = getattr(UpperCamelCase , """position_embeddings""" )
elif layer_num == 2:
trace.append("""token_type_embeddings""" )
UpperCAmelCase : int = getattr(UpperCamelCase , """token_type_embeddings""" )
else:
raise ValueError(F"Unknown embedding layer with name {full_name}" )
trace.append("""weight""" )
UpperCAmelCase : Dict = getattr(UpperCamelCase , """weight""" )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(["""attention""", """self"""] )
UpperCAmelCase : Optional[Any] = getattr(UpperCamelCase , """attention""" )
UpperCAmelCase : List[Any] = getattr(UpperCamelCase , """self""" )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(["""attention""", """output""", """LayerNorm"""] )
UpperCAmelCase : Dict = getattr(UpperCamelCase , """attention""" )
UpperCAmelCase : Union[str, Any] = getattr(UpperCamelCase , """output""" )
UpperCAmelCase : Optional[int] = getattr(UpperCamelCase , """LayerNorm""" )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(["""attention""", """output""", """dense"""] )
UpperCAmelCase : Any = getattr(UpperCamelCase , """attention""" )
UpperCAmelCase : Any = getattr(UpperCamelCase , """output""" )
UpperCAmelCase : Optional[int] = getattr(UpperCamelCase , """dense""" )
elif m_name == "_output_dense":
# output dense
trace.extend(["""output""", """dense"""] )
UpperCAmelCase : List[str] = getattr(UpperCamelCase , """output""" )
UpperCAmelCase : Tuple = getattr(UpperCamelCase , """dense""" )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(["""output""", """LayerNorm"""] )
UpperCAmelCase : Union[str, Any] = getattr(UpperCamelCase , """output""" )
UpperCAmelCase : List[str] = getattr(UpperCamelCase , """LayerNorm""" )
elif m_name == "_key_dense":
# attention key
trace.append("""key""" )
UpperCAmelCase : Union[str, Any] = getattr(UpperCamelCase , """key""" )
elif m_name == "_query_dense":
# attention query
trace.append("""query""" )
UpperCAmelCase : Dict = getattr(UpperCamelCase , """query""" )
elif m_name == "_value_dense":
# attention value
trace.append("""value""" )
UpperCAmelCase : Tuple = getattr(UpperCamelCase , """value""" )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(["""intermediate""", """dense"""] )
UpperCAmelCase : Tuple = getattr(UpperCamelCase , """intermediate""" )
UpperCAmelCase : Any = getattr(UpperCamelCase , """dense""" )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append("""output""" )
UpperCAmelCase : Dict = getattr(UpperCamelCase , """output""" )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append("""bias""" )
UpperCAmelCase : Any = getattr(UpperCamelCase , """bias""" )
elif m_name in ["kernel", "gamma"]:
trace.append("""weight""" )
UpperCAmelCase : List[str] = getattr(UpperCamelCase , """weight""" )
else:
logger.warning(F"Ignored {m_name}" )
# for certain layers reshape is necessary
UpperCAmelCase : List[str] = """.""".join(UpperCamelCase )
if re.match(R"""(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)""" , UpperCamelCase ) or re.match(
R"""(\S+)\.attention\.output\.dense\.weight""" , UpperCamelCase ):
UpperCAmelCase : Dict = array.reshape(pointer.data.shape )
if "kernel" in full_name:
UpperCAmelCase : Dict = array.transpose()
if pointer.shape == array.shape:
UpperCAmelCase : Union[str, Any] = torch.from_numpy(UpperCamelCase )
else:
raise ValueError(
F"Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:"
F" {array.shape}" )
logger.info(F"Successfully set variable {full_name} to PyTorch layer {trace}" )
return model
def _snake_case ( UpperCamelCase : Optional[int] , UpperCamelCase : Optional[Any] , UpperCamelCase : Any ):
# Instantiate model
logger.info(F"Loading model based on config from {config_path}..." )
UpperCAmelCase : Union[str, Any] = BertConfig.from_json_file(UpperCamelCase )
UpperCAmelCase : int = BertModel(UpperCamelCase )
# Load weights from checkpoint
logger.info(F"Loading weights from checkpoint {tf_checkpoint_path}..." )
load_tfa_weights_in_bert(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Save pytorch-model
logger.info(F"Saving PyTorch model to {pytorch_dump_path}..." )
torch.save(model.state_dict() , UpperCamelCase )
if __name__ == "__main__":
A: List[str] = argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow 2.x checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model (must include filename).",
)
A: Dict = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 76 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class __UpperCamelCase ( unittest.TestCase ):
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowerCamelCase_ =AutoConfig.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =TFAutoModel.from_pretrained(lowerCAmelCase, from_pt=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =AutoModel.from_pretrained(lowerCAmelCase, from_tf=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowerCamelCase_ =AutoConfig.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =TFAutoModelForPreTraining.from_pretrained(lowerCAmelCase, from_pt=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =AutoModelForPreTraining.from_pretrained(lowerCAmelCase, from_tf=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ =AutoConfig.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =TFAutoModelForCausalLM.from_pretrained(lowerCAmelCase, from_pt=lowerCAmelCase )
lowerCamelCase_, lowerCamelCase_ =TFAutoModelForCausalLM.from_pretrained(
lowerCAmelCase, output_loading_info=lowerCAmelCase, from_pt=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =AutoModelForCausalLM.from_pretrained(lowerCAmelCase, from_tf=lowerCAmelCase )
lowerCamelCase_, lowerCamelCase_ =AutoModelForCausalLM.from_pretrained(
lowerCAmelCase, output_loading_info=lowerCAmelCase, from_tf=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ =AutoConfig.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase, from_pt=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =AutoModelWithLMHead.from_pretrained(lowerCAmelCase, from_tf=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ =AutoConfig.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =TFAutoModelForMaskedLM.from_pretrained(lowerCAmelCase, from_pt=lowerCAmelCase )
lowerCamelCase_, lowerCamelCase_ =TFAutoModelForMaskedLM.from_pretrained(
lowerCAmelCase, output_loading_info=lowerCAmelCase, from_pt=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =AutoModelForMaskedLM.from_pretrained(lowerCAmelCase, from_tf=lowerCAmelCase )
lowerCamelCase_, lowerCamelCase_ =AutoModelForMaskedLM.from_pretrained(
lowerCAmelCase, output_loading_info=lowerCAmelCase, from_tf=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ =AutoConfig.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =TFAutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase, from_pt=lowerCAmelCase )
lowerCamelCase_, lowerCamelCase_ =TFAutoModelForSeqaSeqLM.from_pretrained(
lowerCAmelCase, output_loading_info=lowerCAmelCase, from_pt=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase, from_tf=lowerCAmelCase )
lowerCamelCase_, lowerCamelCase_ =AutoModelForSeqaSeqLM.from_pretrained(
lowerCAmelCase, output_loading_info=lowerCAmelCase, from_tf=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowerCamelCase_ =AutoConfig.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =TFAutoModelForSequenceClassification.from_pretrained(lowerCAmelCase, from_pt=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =AutoModelForSequenceClassification.from_pretrained(lowerCAmelCase, from_tf=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowerCamelCase_ =AutoConfig.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =TFAutoModelForQuestionAnswering.from_pretrained(lowerCAmelCase, from_pt=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =AutoModelForQuestionAnswering.from_pretrained(lowerCAmelCase, from_tf=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase, from_pt=lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
self.assertEqual(model.num_parameters(), 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase ), 14_410 )
lowerCamelCase_ =AutoModelWithLMHead.from_pretrained(lowerCAmelCase, from_tf=lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
self.assertEqual(model.num_parameters(), 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase ), 14_410 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase, from_pt=lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
self.assertEqual(model.num_parameters(), 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase ), 14_410 )
lowerCamelCase_ =AutoModelWithLMHead.from_pretrained(lowerCAmelCase, from_tf=lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
self.assertEqual(model.num_parameters(), 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase ), 14_410 )
| 75 |
"""simple docstring"""
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _A ( _a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : str = LayoutLMTokenizer
UpperCAmelCase : int = LayoutLMTokenizerFast
UpperCAmelCase : Union[str, Any] = True
UpperCAmelCase : Optional[Any] = True
def __snake_case ( self : Optional[int]):
super().setUp()
a : Tuple = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
a : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def __snake_case ( self : Optional[int] , **__UpperCAmelCase : Tuple):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase)
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : str):
a : Tuple = "UNwant\u00E9d,running"
a : Dict = "unwanted, running"
return input_text, output_text
def __snake_case ( self : Any):
a : List[Any] = self.tokenizer_class(self.vocab_file)
a : str = tokenizer.tokenize("UNwant\u00E9d,running")
self.assertListEqual(__UpperCAmelCase , ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase) , [7, 4, 5, 10, 8, 9])
def __snake_case ( self : Dict):
pass
| 40 | 0 |
import os
__UpperCAmelCase = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 1_00, '''D''': 5_00, '''M''': 10_00}
def __lowerCamelCase ( __magic_name__ : str ):
a__: str =0
a__: Any =0
while index < len(__magic_name__ ) - 1:
a__: str =SYMBOLS[numerals[index]]
a__: int =SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def __lowerCamelCase ( __magic_name__ : int ):
a__: Optional[Any] =""
a__: int =num // 1_000
numerals += m_count * "M"
num %= 1_000
a__: Any =num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
a__: str =num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def __lowerCamelCase ( __magic_name__ : str = "/p089_roman.txt" ):
a__: Dict =0
with open(os.path.dirname(__magic_name__ ) + roman_numerals_filename ) as filea:
a__: Dict =filea.readlines()
for line in lines:
a__: List[Any] =line.strip()
a__: Union[str, Any] =parse_roman_numerals(__magic_name__ )
a__: Optional[Any] =generate_roman_numerals(__magic_name__ )
savings += len(__magic_name__ ) - len(__magic_name__ )
return savings
if __name__ == "__main__":
print(f"""{solution() = }""")
| 42 |
import os
def __lowerCamelCase ( __magic_name__ : str = "input.txt" ):
with open(os.path.join(os.path.dirname(__magic_name__ ) , __magic_name__ ) ) as input_file:
a__: str =[
[int(__magic_name__ ) for element in line.split("," )]
for line in input_file.readlines()
]
a__: int =len(__magic_name__ )
a__: int =len(matrix[0] )
a__: Optional[Any] =[[-1 for _ in range(__magic_name__ )] for _ in range(__magic_name__ )]
for i in range(__magic_name__ ):
a__: Dict =matrix[i][0]
for j in range(1 , __magic_name__ ):
for i in range(__magic_name__ ):
a__: int =minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , __magic_name__ ):
a__: Tuple =min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
a__: Tuple =min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 42 | 1 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
A_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase( __a ):
'''simple docstring'''
def __init__( self: List[Any], a_: List[Any], a_: Tuple ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=a_, scheduler=a_ )
@torch.no_grad()
def __call__( self: Optional[Any], a_: int = 1, a_: int = 100, a_: Optional[Union[torch.Generator, List[torch.Generator]]] = None, a_: Optional[float] = None, a_: bool = True, ):
'''simple docstring'''
if audio_length_in_s is None:
_snake_case : Dict = self.unet.config.sample_size / self.unet.config.sample_rate
_snake_case : Any = audio_length_in_s * self.unet.config.sample_rate
_snake_case : int = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f"{audio_length_in_s} is too small. Make sure it's bigger or equal to"
f" {3 * down_scale_factor / self.unet.config.sample_rate}." )
_snake_case : str = int(a_ )
if sample_size % down_scale_factor != 0:
_snake_case : Optional[Any] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f"{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"
f" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"
""" process.""" )
_snake_case : List[Any] = int(a_ )
_snake_case : Dict = next(iter(self.unet.parameters() ) ).dtype
_snake_case : Any = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(a_, a_ ) and len(a_ ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(a_ )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
_snake_case : Union[str, Any] = randn_tensor(a_, generator=a_, device=self.device, dtype=a_ )
# set step values
self.scheduler.set_timesteps(a_, device=audio.device )
_snake_case : Union[str, Any] = self.scheduler.timesteps.to(a_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
_snake_case : Any = self.unet(a_, a_ ).sample
# 2. compute previous image: x_t -> t_t-1
_snake_case : int = self.scheduler.step(a_, a_, a_ ).prev_sample
_snake_case : Any = audio.clamp(-1, 1 ).float().cpu().numpy()
_snake_case : str = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=a_ )
| 64 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
_lowercase : List[str] =None
_lowercase : Union[str, Any] =logging.get_logger(__name__)
_lowercase : Optional[int] ={"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
_lowercase : Dict ={
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"
),
},
}
_lowercase : str ={
"moussaKam/mbarthez": 1024,
"moussaKam/barthez": 1024,
"moussaKam/barthez-orangesum-title": 1024,
}
_lowercase : Dict ="▁"
class snake_case__ (A__ ):
"""simple docstring"""
__lowerCAmelCase :Union[str, Any] = VOCAB_FILES_NAMES
__lowerCAmelCase :Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase :int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase :Any = ["input_ids", "attention_mask"]
__lowerCAmelCase :Any = BarthezTokenizer
def __init__( self , __lowercase=None , __lowercase=None , __lowercase="<s>" , __lowercase="</s>" , __lowercase="</s>" , __lowercase="<s>" , __lowercase="<unk>" , __lowercase="<pad>" , __lowercase="<mask>" , **__lowercase , ) -> str:
"""simple docstring"""
a__ : int = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token
super().__init__(
__lowercase , tokenizer_file=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , cls_token=__lowercase , pad_token=__lowercase , mask_token=__lowercase , **__lowercase , )
a__ : List[str] = vocab_file
a__ : List[Any] = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a__ : Tuple = [self.cls_token_id]
a__ : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase = None ) -> List[int]:
"""simple docstring"""
a__ : List[Any] = [self.sep_token_id]
a__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__lowercase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
a__ : Tuple = os.path.join(
__lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ):
copyfile(self.vocab_file , __lowercase )
return (out_vocab_file,)
| 170 | 0 |
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
__magic_name__: Union[str, Any] = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__magic_name__: List[Any] = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def UpperCAmelCase_ ( self : int , _A : Any , _A : Optional[int] , _A : Union[str, Any] ) -> str:
"""simple docstring"""
snake_case_ : List[str] = TextaTextGenerationPipeline(model=_A , tokenizer=_A )
return generator, ["Something to write", "Something else"]
def UpperCAmelCase_ ( self : List[Any] , _A : Tuple , _A : Any ) -> Optional[int]:
"""simple docstring"""
snake_case_ : Dict = generator('Something there' )
self.assertEqual(_A , [{'generated_text': ANY(_A )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['generated_text'].startswith('Something there' ) )
snake_case_ : Dict = generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=_A )
self.assertEqual(
_A , [
[{'generated_text': ANY(_A )}, {'generated_text': ANY(_A )}],
[{'generated_text': ANY(_A )}, {'generated_text': ANY(_A )}],
] , )
snake_case_ : List[Any] = generator(
['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=_A )
self.assertEqual(
_A , [
[{'generated_text': ANY(_A )}, {'generated_text': ANY(_A )}],
[{'generated_text': ANY(_A )}, {'generated_text': ANY(_A )}],
] , )
with self.assertRaises(_A ):
generator(4 )
@require_torch
def UpperCAmelCase_ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : Union[str, Any] = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='pt' )
# do_sample=False necessary for reproducibility
snake_case_ : str = generator('Something there' , do_sample=_A )
self.assertEqual(_A , [{'generated_text': ''}] )
snake_case_ : Tuple = 3
snake_case_ : int = generator(
'Something there' , num_return_sequences=_A , num_beams=_A , )
snake_case_ : Optional[Any] = [
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': ''},
]
self.assertEqual(_A , _A )
snake_case_ : Dict = generator('This is a test' , do_sample=_A , num_return_sequences=2 , return_tensors=_A )
self.assertEqual(
_A , [
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
] , )
snake_case_ : List[Any] = generator.model.config.eos_token_id
snake_case_ : Optional[Any] = '<pad>'
snake_case_ : Dict = generator(
['This is a test', 'This is a second test'] , do_sample=_A , num_return_sequences=2 , batch_size=2 , return_tensors=_A , )
self.assertEqual(
_A , [
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
] , )
@require_tf
def UpperCAmelCase_ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
snake_case_ : Union[str, Any] = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='tf' )
# do_sample=False necessary for reproducibility
snake_case_ : Tuple = generator('Something there' , do_sample=_A )
self.assertEqual(_A , [{'generated_text': ''}] )
| 88 |
import argparse
from collections import defaultdict
import yaml
_SCREAMING_SNAKE_CASE = """docs/source/en/_toctree.yml"""
def SCREAMING_SNAKE_CASE__ ( __a ):
snake_case_ : List[Any] = defaultdict(__a )
snake_case_ : Optional[Any] = []
snake_case_ : Optional[Any] = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'local': doc['local'], 'title': doc['title']} )
else:
new_doc_list.append(__a )
snake_case_ : Any = new_doc_list
snake_case_ : str = [key for key, value in counts.items() if value > 1]
snake_case_ : Any = []
for duplicate_key in duplicates:
snake_case_ : Any = list({doc['title'] for doc in doc_list if doc['local'] == duplicate_key} )
if len(__a ) > 1:
raise ValueError(
f"""{duplicate_key} is present several times in the documentation table of content at """
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if 'local' not in counts or counts[doc['local']] == 1] )
snake_case_ : str = sorted(__a , key=lambda __a : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(__a ) > 1:
raise ValueError('{doc_list} has two \'overview\' docs which is not allowed.' )
overview_doc.extend(__a )
# Sort
return overview_doc
def SCREAMING_SNAKE_CASE__ ( __a=False ):
with open(__a , encoding='utf-8' ) as f:
snake_case_ : int = yaml.safe_load(f.read() )
# Get to the API doc
snake_case_ : str = 0
while content[api_idx]["title"] != "API":
api_idx += 1
snake_case_ : Dict = content[api_idx]['sections']
# Then to the model doc
snake_case_ : Tuple = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
snake_case_ : Union[str, Any] = api_doc[scheduler_idx]['sections']
snake_case_ : Optional[Any] = clean_doc_toc(__a )
snake_case_ : int = False
if new_scheduler_doc != scheduler_doc:
snake_case_ : int = True
if overwrite:
snake_case_ : Union[str, Any] = new_scheduler_doc
if diff:
if overwrite:
snake_case_ : Optional[int] = api_doc
with open(__a , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(__a , allow_unicode=__a ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
def SCREAMING_SNAKE_CASE__ ( __a=False ):
with open(__a , encoding='utf-8' ) as f:
snake_case_ : Dict = yaml.safe_load(f.read() )
# Get to the API doc
snake_case_ : Any = 0
while content[api_idx]["title"] != "API":
api_idx += 1
snake_case_ : str = content[api_idx]['sections']
# Then to the model doc
snake_case_ : List[Any] = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
snake_case_ : Dict = False
snake_case_ : Union[str, Any] = api_doc[pipeline_idx]['sections']
snake_case_ : Union[str, Any] = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
snake_case_ : Optional[Any] = pipeline_doc['section']
snake_case_ : Optional[int] = clean_doc_toc(__a )
if overwrite:
snake_case_ : Tuple = new_sub_pipeline_doc
new_pipeline_docs.append(__a )
# sort overall pipeline doc
snake_case_ : Optional[Any] = clean_doc_toc(__a )
if new_pipeline_docs != pipeline_docs:
snake_case_ : List[str] = True
if overwrite:
snake_case_ : List[str] = new_pipeline_docs
if diff:
if overwrite:
snake_case_ : List[Any] = api_doc
with open(__a , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(__a , allow_unicode=__a ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
_SCREAMING_SNAKE_CASE = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 88 | 1 |
import itertools
import math
def a ( snake_case__: int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a ( ):
'''simple docstring'''
lowercase_ = 2
while True:
if is_prime(snake_case__ ):
yield num
num += 1
def a ( snake_case__: int = 10_001 ):
'''simple docstring'''
return next(itertools.islice(prime_generator() , nth - 1 , snake_case__ ) )
if __name__ == "__main__":
print(f"{solution() = }")
| 30 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__a = {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['DeiTFeatureExtractor']
__a = ['DeiTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DeiTForImageClassification',
'DeiTForImageClassificationWithTeacher',
'DeiTForMaskedImageModeling',
'DeiTModel',
'DeiTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDeiTForImageClassification',
'TFDeiTForImageClassificationWithTeacher',
'TFDeiTForMaskedImageModeling',
'TFDeiTModel',
'TFDeiTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 30 | 1 |
from __future__ import annotations
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
__snake_case : str = []
__snake_case , __snake_case : List[str] = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
__snake_case : List[Any] = result + left + right
return input_list
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list ):
'''simple docstring'''
if len(__SCREAMING_SNAKE_CASE ) <= 1:
return input_list
__snake_case : Union[str, Any] = list(__SCREAMING_SNAKE_CASE )
# iteration for two-way merging
__snake_case : Tuple = 2
while p <= len(__SCREAMING_SNAKE_CASE ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ):
__snake_case : List[str] = i
__snake_case : str = i + p - 1
__snake_case : Optional[Any] = (low + high + 1) // 2
__snake_case : str = merge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# final merge of last two parts
if p * 2 >= len(__SCREAMING_SNAKE_CASE ):
__snake_case : List[str] = i
__snake_case : str = merge(__SCREAMING_SNAKE_CASE , 0 , __SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
lowercase_ = input("Enter numbers separated by a comma:\n").strip()
if user_input == "":
lowercase_ = []
else:
lowercase_ = [int(item.strip()) for item in user_input.split(",")]
print(iter_merge_sort(unsorted))
| 20 | from __future__ import annotations
import math
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : bool , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if len(__SCREAMING_SNAKE_CASE ) == 0:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , )
return min(
minimax(depth + 1 , node_index * 2 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , )
def __lowerCAmelCase ( ):
'''simple docstring'''
__snake_case : str = [9_0, 2_3, 6, 3_3, 2_1, 6_5, 1_2_3, 3_4_4_2_3]
__snake_case : Optional[Any] = math.log(len(__SCREAMING_SNAKE_CASE ) , 2 )
print("""Optimal value : """ , end="""""" )
print(minimax(0 , 0 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 20 | 1 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase_ ( a__ ):
def __a ( self ):
UpperCamelCase__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(a , "embed_dim" ) )
self.parent.assertTrue(hasattr(a , "num_heads" ) )
class lowercase_ :
def __init__( self , a , a=13 , a=64 , a=3 , a=[16, 48, 96] , a=[1, 3, 6] , a=[1, 2, 10] , a=[7, 3, 3] , a=[4, 2, 2] , a=[2, 1, 1] , a=[2, 2, 2] , a=[False, False, True] , a=[0.0, 0.0, 0.0] , a=0.02 , a=1e-12 , a=True , a=True , a=2 , ):
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_sizes
UpperCamelCase__ = patch_stride
UpperCamelCase__ = patch_padding
UpperCamelCase__ = is_training
UpperCamelCase__ = use_labels
UpperCamelCase__ = num_labels
UpperCamelCase__ = num_channels
UpperCamelCase__ = embed_dim
UpperCamelCase__ = num_heads
UpperCamelCase__ = stride_kv
UpperCamelCase__ = depth
UpperCamelCase__ = cls_token
UpperCamelCase__ = attention_drop_rate
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
def __a ( self ):
UpperCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ = None
if self.use_labels:
# create a random int32 tensor of given shape
UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase__ = self.get_config()
return config, pixel_values, labels
def __a ( self ):
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def __a ( self , a , a , a ):
UpperCamelCase__ = TFCvtModel(config=a )
UpperCamelCase__ = model(a , training=a )
UpperCamelCase__ = (self.image_size, self.image_size)
UpperCamelCase__ , UpperCamelCase__ = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
UpperCamelCase__ = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
UpperCamelCase__ = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def __a ( self , a , a , a ):
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = TFCvtForImageClassification(a )
UpperCamelCase__ = model(a , labels=a , training=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self ):
UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = config_and_inputs
UpperCamelCase__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class lowercase_ ( a__ , a__ , unittest.TestCase ):
__UpperCAmelCase = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
__UpperCAmelCase = (
{'feature-extraction': TFCvtModel, 'image-classification': TFCvtForImageClassification}
if is_tf_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def __a ( self ):
UpperCamelCase__ = TFCvtModelTester(self )
UpperCamelCase__ = TFCvtConfigTester(self , config_class=a , has_text_modality=a , hidden_size=37 )
def __a ( self ):
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="Cvt does not output attentions" )
def __a ( self ):
pass
@unittest.skip(reason="Cvt does not use inputs_embeds" )
def __a ( self ):
pass
@unittest.skip(reason="Cvt does not support input and output embeddings" )
def __a ( self ):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
def __a ( self ):
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
@slow
def __a ( self ):
super().test_keras_fit()
@unittest.skip(reason="Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8" )
def __a ( self ):
UpperCamelCase__ = tf.keras.mixed_precision.Policy("mixed_float16" )
tf.keras.mixed_precision.set_global_policy(a )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("float32" )
def __a ( self ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(a )
UpperCamelCase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ = [*signature.parameters.keys()]
UpperCamelCase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a )
def __a ( self ):
def check_hidden_states_output(a , a , a ):
UpperCamelCase__ = model_class(a )
UpperCamelCase__ = model(**self._prepare_for_class(a , a ) )
UpperCamelCase__ = outputs.hidden_states
UpperCamelCase__ = len(self.model_tester.depth )
self.assertEqual(len(a ) , a )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = True
check_hidden_states_output(a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__ = True
check_hidden_states_output(a , a , a )
def __a ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def __a ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a )
@slow
def __a ( self ):
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ = TFCvtModel.from_pretrained(a )
self.assertIsNotNone(a )
def _UpperCamelCase ( ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class lowercase_ ( unittest.TestCase ):
@cached_property
def __a ( self ):
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __a ( self ):
UpperCamelCase__ = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=a , return_tensors="tf" )
# forward pass
UpperCamelCase__ = model(**a )
# verify the logits
UpperCamelCase__ = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , a )
UpperCamelCase__ = tf.constant([0.9285, 0.9015, -0.3150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , a , atol=1e-4 ) )
| 80 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def _UpperCamelCase ( __A , __A , __A , __A , __A ) -> List[str]:
'''simple docstring'''
with open(__A ) as metadata_file:
UpperCamelCase__ = json.load(__A )
UpperCamelCase__ = LukeConfig(use_entity_aware_attention=__A , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
UpperCamelCase__ = torch.load(__A , map_location="cpu" )["module"]
# Load the entity vocab file
UpperCamelCase__ = load_original_entity_vocab(__A )
# add an entry for [MASK2]
UpperCamelCase__ = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
UpperCamelCase__ = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
UpperCamelCase__ = AddedToken("<ent>" , lstrip=__A , rstrip=__A )
UpperCamelCase__ = AddedToken("<ent2>" , lstrip=__A , rstrip=__A )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(__A )
with open(os.path.join(__A , "tokenizer_config.json" ) , "r" ) as f:
UpperCamelCase__ = json.load(__A )
UpperCamelCase__ = "MLukeTokenizer"
with open(os.path.join(__A , "tokenizer_config.json" ) , "w" ) as f:
json.dump(__A , __A )
with open(os.path.join(__A , MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(__A , __A )
UpperCamelCase__ = MLukeTokenizer.from_pretrained(__A )
# Initialize the embeddings of the special tokens
UpperCamelCase__ = tokenizer.convert_tokens_to_ids(["@"] )[0]
UpperCamelCase__ = tokenizer.convert_tokens_to_ids(["#"] )[0]
UpperCamelCase__ = state_dict["embeddings.word_embeddings.weight"]
UpperCamelCase__ = word_emb[ent_init_index].unsqueeze(0 )
UpperCamelCase__ = word_emb[enta_init_index].unsqueeze(0 )
UpperCamelCase__ = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
UpperCamelCase__ = state_dict[bias_name]
UpperCamelCase__ = decoder_bias[ent_init_index].unsqueeze(0 )
UpperCamelCase__ = decoder_bias[enta_init_index].unsqueeze(0 )
UpperCamelCase__ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
UpperCamelCase__ = F'''encoder.layer.{layer_index}.attention.self.'''
UpperCamelCase__ = state_dict[prefix + matrix_name]
UpperCamelCase__ = state_dict[prefix + matrix_name]
UpperCamelCase__ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
UpperCamelCase__ = state_dict["entity_embeddings.entity_embeddings.weight"]
UpperCamelCase__ = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
UpperCamelCase__ = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
UpperCamelCase__ = state_dict["entity_predictions.bias"]
UpperCamelCase__ = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
UpperCamelCase__ = torch.cat([entity_prediction_bias, entity_mask_bias] )
UpperCamelCase__ = LukeForMaskedLM(config=__A ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
UpperCamelCase__ = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
UpperCamelCase__ = state_dict[key]
else:
UpperCamelCase__ = state_dict[key]
UpperCamelCase__ , UpperCamelCase__ = model.load_state_dict(__A , strict=__A )
if set(__A ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(__A ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
UpperCamelCase__ = MLukeTokenizer.from_pretrained(__A , task="entity_classification" )
UpperCamelCase__ = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
UpperCamelCase__ = (0, 9)
UpperCamelCase__ = tokenizer(__A , entity_spans=[span] , return_tensors="pt" )
UpperCamelCase__ = model(**__A )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
UpperCamelCase__ = torch.Size((1, 33, 768) )
UpperCamelCase__ = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __A , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
UpperCamelCase__ = torch.Size((1, 1, 768) )
UpperCamelCase__ = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __A , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
UpperCamelCase__ = MLukeTokenizer.from_pretrained(__A )
UpperCamelCase__ = "Tokyo is the capital of <mask>."
UpperCamelCase__ = (24, 30)
UpperCamelCase__ = tokenizer(__A , entity_spans=[span] , return_tensors="pt" )
UpperCamelCase__ = model(**__A )
UpperCamelCase__ = encoding["input_ids"][0].tolist()
UpperCamelCase__ = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
UpperCamelCase__ = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(__A )
UpperCamelCase__ = outputs.entity_logits[0][0].argmax().item()
UpperCamelCase__ = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(__A ) )
model.save_pretrained(__A )
def _UpperCamelCase ( __A ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = ["[MASK]", "[PAD]", "[UNK]"]
UpperCamelCase__ = [json.loads(__A ) for line in open(__A )]
UpperCamelCase__ = {}
for entry in data:
UpperCamelCase__ = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
UpperCamelCase__ = entity_id
break
UpperCamelCase__ = F'''{language}:{entity_name}'''
UpperCamelCase__ = entity_id
return new_mapping
if __name__ == "__main__":
a__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
a__ : Any = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 80 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase_ : List[str] = logging.get_logger(__name__)
class UpperCamelCase_ (UpperCamelCase_ , UpperCamelCase_ ):
__magic_name__ = """maskformer-swin"""
__magic_name__ = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : str , lowerCAmelCase_ : Any=224 , lowerCAmelCase_ : Optional[Any]=4 , lowerCAmelCase_ : str=3 , lowerCAmelCase_ : Union[str, Any]=96 , lowerCAmelCase_ : Dict=[2, 2, 6, 2] , lowerCAmelCase_ : List[str]=[3, 6, 12, 24] , lowerCAmelCase_ : Union[str, Any]=7 , lowerCAmelCase_ : Optional[int]=4.0 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Dict=0.0 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : str="gelu" , lowerCAmelCase_ : str=False , lowerCAmelCase_ : Union[str, Any]=0.0_2 , lowerCAmelCase_ : List[Any]=1e-5 , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Dict=None , **lowerCAmelCase_ : Dict , ) -> Optional[Any]:
super().__init__(**_a )
UpperCAmelCase_ : Tuple = image_size
UpperCAmelCase_ : Dict = patch_size
UpperCAmelCase_ : int = num_channels
UpperCAmelCase_ : Any = embed_dim
UpperCAmelCase_ : Dict = depths
UpperCAmelCase_ : Tuple = len(_a )
UpperCAmelCase_ : Optional[int] = num_heads
UpperCAmelCase_ : Optional[int] = window_size
UpperCAmelCase_ : Any = mlp_ratio
UpperCAmelCase_ : Any = qkv_bias
UpperCAmelCase_ : List[str] = hidden_dropout_prob
UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Any = drop_path_rate
UpperCAmelCase_ : str = hidden_act
UpperCAmelCase_ : Any = use_absolute_embeddings
UpperCAmelCase_ : Optional[Any] = layer_norm_eps
UpperCAmelCase_ : Optional[Any] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase_ : Dict = int(embed_dim * 2 ** (len(_a ) - 1) )
UpperCAmelCase_ : str = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(_a ) + 1 )]
UpperCAmelCase_ : str = get_aligned_output_features_output_indices(
out_features=_a , out_indices=_a , stage_names=self.stage_names )
| 362 |
"""simple docstring"""
from math import factorial
lowerCamelCase_ = {str(d): factorial(d) for d in range(10)}
def snake_case ( A__ ):
return sum(DIGIT_FACTORIAL[d] for d in str(A__ ) )
def snake_case ( ):
UpperCAmelCase_ : int = 7 * factorial(9 ) + 1
return sum(i for i in range(3 ,A__ ) if sum_of_digit_factorial(A__ ) == i )
if __name__ == "__main__":
print(f'{solution() = }')
| 253 | 0 |
'''simple docstring'''
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def __a():
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(__lowerCAmelCase ):
requests.request("GET" , "https://huggingface.co" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("GET" , "https://huggingface.co" , timeout=1.0 )
@pytest.mark.integration
def __a():
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("GET" , "https://huggingface.co" )
def __a():
'''simple docstring'''
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(__lowerCAmelCase ):
http_head("https://huggingface.co" )
| 158 |
def __A ( __lowerCAmelCase )-> list:
"""simple docstring"""
if len(__lowerCAmelCase ) < 2:
return collection
def circle_sort_util(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> bool:
_UpperCAmelCase = False
if low == high:
return swapped
_UpperCAmelCase = low
_UpperCAmelCase = high
while left < right:
if collection[left] > collection[right]:
_UpperCAmelCase , _UpperCAmelCase = (
collection[right],
collection[left],
)
_UpperCAmelCase = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
_UpperCAmelCase , _UpperCAmelCase = (
collection[right + 1],
collection[left],
)
_UpperCAmelCase = True
_UpperCAmelCase = low + int((high - low) / 2 )
_UpperCAmelCase = circle_sort_util(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = circle_sort_util(__lowerCAmelCase , mid + 1 , __lowerCAmelCase )
return swapped or left_swap or right_swap
_UpperCAmelCase = True
while is_not_sorted is True:
_UpperCAmelCase = circle_sort_util(__lowerCAmelCase , 0 , len(__lowerCAmelCase ) - 1 )
return collection
if __name__ == "__main__":
_a = input('''Enter numbers separated by a comma:\n''').strip()
_a = [int(item) for item in user_input.split(''',''')]
print(circle_sort(unsorted))
| 39 | 0 |
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError("""only integers accepted as input""" )
else:
lowercase = str(abs(lowerCAmelCase__ ) )
lowercase = [list(lowerCAmelCase__ ) for char in range(len(lowerCAmelCase__ ) )]
for index in range(len(lowerCAmelCase__ ) ):
num_transpositions[index].pop(lowerCAmelCase__ )
return max(
int("""""".join(list(lowerCAmelCase__ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 32 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowerCAmelCase : List[str] ={"""configuration_vit""": ["""VIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTConfig""", """ViTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] =["""ViTFeatureExtractor"""]
__lowerCAmelCase : List[str] =["""ViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str =[
"""VIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTForImageClassification""",
"""ViTForMaskedImageModeling""",
"""ViTModel""",
"""ViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any =[
"""TFViTForImageClassification""",
"""TFViTModel""",
"""TFViTPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Dict =[
"""FlaxViTForImageClassification""",
"""FlaxViTModel""",
"""FlaxViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__lowerCAmelCase : List[str] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 32 | 1 |
from collections.abc import Generator
def __lowerCAmelCase ( ) -> Generator[int, None, None]:
__a , __a = 0, 1
while True:
__a , __a = b, a + b
yield b
def __lowerCAmelCase ( a__ = 1000 ) -> int:
__a = 1
__a = fibonacci_generator()
while len(str(next(a__ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip()))) | 6 |
from __future__ import annotations
def a ( A__ : list[int] ) -> int:
"""simple docstring"""
if not nums:
return 0
_lowercase =nums[0]
_lowercase =0
for num in nums[1:]:
_lowercase , _lowercase =(
max_excluding + num,
max(A__ , A__ ),
)
return max(A__ , A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 205 | 0 |
'''simple docstring'''
def __magic_name__ ( A , A ) -> str:
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(A , int(b / 2 ) ) * actual_power(A , int(b / 2 ) )
else:
return a * actual_power(A , int(b / 2 ) ) * actual_power(A , int(b / 2 ) )
def __magic_name__ ( A , A ) -> float:
if b < 0:
return 1 / actual_power(A , A )
return actual_power(A , A )
if __name__ == "__main__":
print(power(-2, -3))
| 332 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = 42
class lowerCamelCase ( __lowerCAmelCase , __lowerCAmelCase ):
@register_to_config
def __init__( self, lowercase_ = 3, lowercase_ = 3, lowercase_ = ("DownEncoderBlock2D",), lowercase_ = ("UpDecoderBlock2D",), lowercase_ = (64,), lowercase_ = 1, lowercase_ = "silu", lowercase_ = 3, lowercase_ = 32, lowercase_ = 256, lowercase_ = 32, lowercase_ = None, lowercase_ = 0.18_215, lowercase_ = "group", ) -> str:
super().__init__()
# pass init params to Encoder
snake_case = Encoder(
in_channels=lowercase_, out_channels=lowercase_, down_block_types=lowercase_, block_out_channels=lowercase_, layers_per_block=lowercase_, act_fn=lowercase_, norm_num_groups=lowercase_, double_z=lowercase_, )
snake_case = vq_embed_dim if vq_embed_dim is not None else latent_channels
snake_case = nn.Convad(lowercase_, lowercase_, 1 )
snake_case = VectorQuantizer(lowercase_, lowercase_, beta=0.25, remap=lowercase_, sane_index_shape=lowercase_ )
snake_case = nn.Convad(lowercase_, lowercase_, 1 )
# pass init params to Decoder
snake_case = Decoder(
in_channels=lowercase_, out_channels=lowercase_, up_block_types=lowercase_, block_out_channels=lowercase_, layers_per_block=lowercase_, act_fn=lowercase_, norm_num_groups=lowercase_, norm_type=lowercase_, )
@apply_forward_hook
def _lowerCamelCase ( self, lowercase_, lowercase_ = True ) -> VQEncoderOutput:
snake_case = self.encoder(lowercase_ )
snake_case = self.quant_conv(lowercase_ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=lowercase_ )
@apply_forward_hook
def _lowerCamelCase ( self, lowercase_, lowercase_ = False, lowercase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
# also go through quantization layer
if not force_not_quantize:
snake_case , snake_case , snake_case = self.quantize(lowercase_ )
else:
snake_case = h
snake_case = self.post_quant_conv(lowercase_ )
snake_case = self.decoder(lowercase_, quant if self.config.norm_type == 'spatial' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase_ )
def _lowerCamelCase ( self, lowercase_, lowercase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
snake_case = sample
snake_case = self.encode(lowercase_ ).latents
snake_case = self.decode(lowercase_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase_ )
| 332 | 1 |
from __future__ import annotations
import math
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
if len(lowerCAmelCase_ ) != 2 or len(a[0] ) != 2 or len(lowerCAmelCase_ ) != 2 or len(b[0] ) != 2:
raise Exception('Matrices are not 2x2' )
SCREAMING_SNAKE_CASE =[
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowerCAmelCase_ ) )
]
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowerCAmelCase_ ) )
]
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
if len(lowerCAmelCase_ ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('Odd matrices are not supported!' )
SCREAMING_SNAKE_CASE =len(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =matrix_length // 2
SCREAMING_SNAKE_CASE =[[a[i][j] for j in range(lowerCAmelCase_, lowerCAmelCase_ )] for i in range(lowerCAmelCase_ )]
SCREAMING_SNAKE_CASE =[
[a[i][j] for j in range(lowerCAmelCase_, lowerCAmelCase_ )] for i in range(lowerCAmelCase_, lowerCAmelCase_ )
]
SCREAMING_SNAKE_CASE =[[a[i][j] for j in range(lowerCAmelCase_ )] for i in range(lowerCAmelCase_ )]
SCREAMING_SNAKE_CASE =[[a[i][j] for j in range(lowerCAmelCase_ )] for i in range(lowerCAmelCase_, lowerCAmelCase_ )]
return top_left, top_right, bot_left, bot_right
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
return len(lowerCAmelCase_ ), len(matrix[0] )
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
print('\n'.join(str(lowerCAmelCase_ ) for line in matrix ) )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
if matrix_dimensions(lowerCAmelCase_ ) == (2, 2):
return default_matrix_multiplication(lowerCAmelCase_, lowerCAmelCase_ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =split_matrix(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =split_matrix(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =actual_strassen(lowerCAmelCase_, matrix_subtraction(lowerCAmelCase_, lowerCAmelCase_ ) )
SCREAMING_SNAKE_CASE =actual_strassen(matrix_addition(lowerCAmelCase_, lowerCAmelCase_ ), lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =actual_strassen(matrix_addition(lowerCAmelCase_, lowerCAmelCase_ ), lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =actual_strassen(lowerCAmelCase_, matrix_subtraction(lowerCAmelCase_, lowerCAmelCase_ ) )
SCREAMING_SNAKE_CASE =actual_strassen(matrix_addition(lowerCAmelCase_, lowerCAmelCase_ ), matrix_addition(lowerCAmelCase_, lowerCAmelCase_ ) )
SCREAMING_SNAKE_CASE =actual_strassen(matrix_subtraction(lowerCAmelCase_, lowerCAmelCase_ ), matrix_addition(lowerCAmelCase_, lowerCAmelCase_ ) )
SCREAMING_SNAKE_CASE =actual_strassen(matrix_subtraction(lowerCAmelCase_, lowerCAmelCase_ ), matrix_addition(lowerCAmelCase_, lowerCAmelCase_ ) )
SCREAMING_SNAKE_CASE =matrix_addition(matrix_subtraction(matrix_addition(lowerCAmelCase_, lowerCAmelCase_ ), lowerCAmelCase_ ), lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =matrix_addition(lowerCAmelCase_, lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =matrix_addition(lowerCAmelCase_, lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =matrix_subtraction(matrix_subtraction(matrix_addition(lowerCAmelCase_, lowerCAmelCase_ ), lowerCAmelCase_ ), lowerCAmelCase_ )
# construct the new matrix from our 4 quadrants
SCREAMING_SNAKE_CASE =[]
for i in range(len(lowerCAmelCase_ ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(lowerCAmelCase_ ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
if matrix_dimensions(lowerCAmelCase_ )[1] != matrix_dimensions(lowerCAmelCase_ )[0]:
SCREAMING_SNAKE_CASE =(
'Unable to multiply these matrices, please check the dimensions.\n'
F'Matrix A: {matrixa}\n'
F'Matrix B: {matrixa}'
)
raise Exception(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =matrix_dimensions(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =matrix_dimensions(lowerCAmelCase_ )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
SCREAMING_SNAKE_CASE =max(*lowerCAmelCase_, *lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =int(math.pow(2, math.ceil(math.loga(lowerCAmelCase_ ) ) ) )
SCREAMING_SNAKE_CASE =matrixa
SCREAMING_SNAKE_CASE =matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0, lowerCAmelCase_ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1], lowerCAmelCase_ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1], lowerCAmelCase_ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
SCREAMING_SNAKE_CASE =actual_strassen(lowerCAmelCase_, lowerCAmelCase_ )
# Removing the additional zeros
for i in range(0, lowerCAmelCase_ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1], lowerCAmelCase_ ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
_lowerCamelCase =[
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
_lowerCamelCase =[[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 334 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase =[
("bert.bert", "visual_bert"),
("bert.cls", "cls"),
("bert.classifier", "cls"),
("token_type_embeddings_visual", "visual_token_type_embeddings"),
("position_embeddings_visual", "visual_position_embeddings"),
("projection", "visual_projection"),
]
_lowerCamelCase =[
"nlvr2_coco_pre_trained.th",
"nlvr2_fine_tuned.th",
"nlvr2_pre_trained.th",
"vcr_coco_pre_train.th",
"vcr_fine_tune.th",
"vcr_pre_train.th",
"vqa_coco_pre_trained.th",
"vqa_fine_tuned.th",
"vqa_pre_trained.th",
]
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =torch.load(lowerCAmelCase_, map_location='cpu' )
return sd
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_=rename_keys_prefix ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =OrderedDict()
SCREAMING_SNAKE_CASE =torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
SCREAMING_SNAKE_CASE =key
for name_pair in rename_keys_prefix:
SCREAMING_SNAKE_CASE =new_key.replace(name_pair[0], name_pair[1] )
SCREAMING_SNAKE_CASE =d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
SCREAMING_SNAKE_CASE =new_d['cls.predictions.bias']
return new_d
@torch.no_grad()
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
assert (
checkpoint_path.split('/' )[-1] in ACCEPTABLE_CHECKPOINTS
), F'The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'
# Get Config
if "pre" in checkpoint_path:
SCREAMING_SNAKE_CASE ='pretraining'
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 512}
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 2048}
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 2048}
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 1024}
else:
raise NotImplementedError(F'No implementation found for `{checkpoint_path}`.' )
else:
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 512}
SCREAMING_SNAKE_CASE ='multichoice'
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 2048}
SCREAMING_SNAKE_CASE ='vqa_advanced'
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 2048, 'num_labels': 3129}
SCREAMING_SNAKE_CASE ='vqa'
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE ={
'visual_embedding_dim': 1024,
'num_labels': 2,
}
SCREAMING_SNAKE_CASE ='nlvr'
SCREAMING_SNAKE_CASE =VisualBertConfig(**lowerCAmelCase_ )
# Load State Dict
SCREAMING_SNAKE_CASE =load_state_dict(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =get_new_dict(lowerCAmelCase_, lowerCAmelCase_ )
if model_type == "pretraining":
SCREAMING_SNAKE_CASE =VisualBertForPreTraining(lowerCAmelCase_ )
elif model_type == "vqa":
SCREAMING_SNAKE_CASE =VisualBertForQuestionAnswering(lowerCAmelCase_ )
elif model_type == "nlvr":
SCREAMING_SNAKE_CASE =VisualBertForVisualReasoning(lowerCAmelCase_ )
elif model_type == "multichoice":
SCREAMING_SNAKE_CASE =VisualBertForMultipleChoice(lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ )
# Save Checkpoints
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.")
_lowerCamelCase =parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 334 | 1 |
"""simple docstring"""
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="session" )
def _SCREAMING_SNAKE_CASE ( ) -> Tuple:
A__ = 10
A__ = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string" ) ),
"labels": datasets.Sequence(datasets.ClassLabel(names=["negative", "positive"] ) ),
"answers": datasets.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
"id": datasets.Value("int64" ),
} )
A__ = datasets.Dataset.from_dict(
{
"tokens": [["foo"] * 5] * n,
"labels": [[1] * 5] * n,
"answers": [{"answer_start": [97], "text": ["1976"]}] * 10,
"id": list(range(UpperCAmelCase_ ) ),
} , features=UpperCAmelCase_ , )
return dataset
@pytest.fixture(scope="session" )
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
A__ = str(tmp_path_factory.mktemp("data" ) / "file.arrow" )
dataset.map(cache_file_name=UpperCAmelCase_ )
return filename
# FILE_CONTENT + files
SCREAMING_SNAKE_CASE = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope="session" )
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> Union[str, Any]:
A__ = tmp_path_factory.mktemp("data" ) / "file.txt"
A__ = FILE_CONTENT
with open(UpperCAmelCase_ , "w" ) as f:
f.write(UpperCAmelCase_ )
return filename
@pytest.fixture(scope="session" )
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
import bza
A__ = tmp_path_factory.mktemp("data" ) / "file.txt.bz2"
A__ = bytes(UpperCAmelCase_ , "utf-8" )
with bza.open(UpperCAmelCase_ , "wb" ) as f:
f.write(UpperCAmelCase_ )
return path
@pytest.fixture(scope="session" )
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
import gzip
A__ = str(tmp_path_factory.mktemp("data" ) / "file.txt.gz" )
A__ = bytes(UpperCAmelCase_ , "utf-8" )
with gzip.open(UpperCAmelCase_ , "wb" ) as f:
f.write(UpperCAmelCase_ )
return path
@pytest.fixture(scope="session" )
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
if datasets.config.LZ4_AVAILABLE:
import lza.frame
A__ = tmp_path_factory.mktemp("data" ) / "file.txt.lz4"
A__ = bytes(UpperCAmelCase_ , "utf-8" )
with lza.frame.open(UpperCAmelCase_ , "wb" ) as f:
f.write(UpperCAmelCase_ )
return path
@pytest.fixture(scope="session" )
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Any:
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
A__ = tmp_path_factory.mktemp("data" ) / "file.txt.7z"
with pyazr.SevenZipFile(UpperCAmelCase_ , "w" ) as archive:
archive.write(UpperCAmelCase_ , arcname=os.path.basename(UpperCAmelCase_ ) )
return path
@pytest.fixture(scope="session" )
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Any:
import tarfile
A__ = tmp_path_factory.mktemp("data" ) / "file.txt.tar"
with tarfile.TarFile(UpperCAmelCase_ , "w" ) as f:
f.add(UpperCAmelCase_ , arcname=os.path.basename(UpperCAmelCase_ ) )
return path
@pytest.fixture(scope="session" )
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
import lzma
A__ = tmp_path_factory.mktemp("data" ) / "file.txt.xz"
A__ = bytes(UpperCAmelCase_ , "utf-8" )
with lzma.open(UpperCAmelCase_ , "wb" ) as f:
f.write(UpperCAmelCase_ )
return path
@pytest.fixture(scope="session" )
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Tuple:
import zipfile
A__ = tmp_path_factory.mktemp("data" ) / "file.txt.zip"
with zipfile.ZipFile(UpperCAmelCase_ , "w" ) as f:
f.write(UpperCAmelCase_ , arcname=os.path.basename(UpperCAmelCase_ ) )
return path
@pytest.fixture(scope="session" )
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> Dict:
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
A__ = tmp_path_factory.mktemp("data" ) / "file.txt.zst"
A__ = bytes(UpperCAmelCase_ , "utf-8" )
with zstd.open(UpperCAmelCase_ , "wb" ) as f:
f.write(UpperCAmelCase_ )
return path
@pytest.fixture(scope="session" )
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> str:
A__ = tmp_path_factory.mktemp("data" ) / "file.xml"
A__ = textwrap.dedent(
"\\n <?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n <tmx version=\"1.4\">\n <header segtype=\"sentence\" srclang=\"ca\" />\n <body>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>" )
with open(UpperCAmelCase_ , "w" ) as f:
f.write(UpperCAmelCase_ )
return filename
SCREAMING_SNAKE_CASE = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
SCREAMING_SNAKE_CASE = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
SCREAMING_SNAKE_CASE = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
SCREAMING_SNAKE_CASE = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
SCREAMING_SNAKE_CASE = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope="session" )
def _SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="session" )
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any:
A__ = datasets.Dataset.from_dict(UpperCAmelCase_ )
A__ = str(tmp_path_factory.mktemp("data" ) / "dataset.arrow" )
dataset.map(cache_file_name=UpperCAmelCase_ )
return path
@pytest.fixture(scope="session" )
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> Union[str, Any]:
A__ = str(tmp_path_factory.mktemp("data" ) / "dataset.sqlite" )
with contextlib.closing(sqlitea.connect(UpperCAmelCase_ ) ) as con:
A__ = con.cursor()
cur.execute("CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)" )
for item in DATA:
cur.execute("INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="session" )
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> str:
A__ = str(tmp_path_factory.mktemp("data" ) / "dataset.csv" )
with open(UpperCAmelCase_ , "w" , newline="" ) as f:
A__ = csv.DictWriter(UpperCAmelCase_ , fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(UpperCAmelCase_ )
return path
@pytest.fixture(scope="session" )
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[int]:
A__ = str(tmp_path_factory.mktemp("data" ) / "dataset2.csv" )
with open(UpperCAmelCase_ , "w" , newline="" ) as f:
A__ = csv.DictWriter(UpperCAmelCase_ , fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(UpperCAmelCase_ )
return path
@pytest.fixture(scope="session" )
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> List[str]:
import bza
A__ = tmp_path_factory.mktemp("data" ) / "dataset.csv.bz2"
with open(UpperCAmelCase_ , "rb" ) as f:
A__ = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(UpperCAmelCase_ , "wb" ) as f:
f.write(UpperCAmelCase_ )
return path
@pytest.fixture(scope="session" )
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> List[Any]:
A__ = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip"
with zipfile.ZipFile(UpperCAmelCase_ , "w" ) as f:
f.write(UpperCAmelCase_ , arcname=os.path.basename(UpperCAmelCase_ ) )
f.write(UpperCAmelCase_ , arcname=os.path.basename(UpperCAmelCase_ ) )
return path
@pytest.fixture(scope="session" )
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> List[str]:
A__ = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip"
with zipfile.ZipFile(UpperCAmelCase_ , "w" ) as f:
f.write(UpperCAmelCase_ , arcname=os.path.basename(csv_path.replace(".csv" , ".CSV" ) ) )
f.write(UpperCAmelCase_ , arcname=os.path.basename(csva_path.replace(".csv" , ".CSV" ) ) )
return path
@pytest.fixture(scope="session" )
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> int:
A__ = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.csv.zip"
with zipfile.ZipFile(UpperCAmelCase_ , "w" ) as f:
f.write(UpperCAmelCase_ , arcname=os.path.join("main_dir" , os.path.basename(UpperCAmelCase_ ) ) )
f.write(UpperCAmelCase_ , arcname=os.path.join("main_dir" , os.path.basename(UpperCAmelCase_ ) ) )
return path
@pytest.fixture(scope="session" )
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> Dict:
A__ = str(tmp_path_factory.mktemp("data" ) / "dataset.parquet" )
A__ = pa.schema(
{
"col_1": pa.string(),
"col_2": pa.intaa(),
"col_3": pa.floataa(),
} )
with open(UpperCAmelCase_ , "wb" ) as f:
A__ = pq.ParquetWriter(UpperCAmelCase_ , schema=UpperCAmelCase_ )
A__ = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(UpperCAmelCase_ ) )] for k in DATA[0]} , schema=UpperCAmelCase_ )
writer.write_table(UpperCAmelCase_ )
writer.close()
return path
@pytest.fixture(scope="session" )
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[int]:
A__ = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
A__ = {"data": DATA}
with open(UpperCAmelCase_ , "w" ) as f:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
return path
@pytest.fixture(scope="session" )
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> Union[str, Any]:
A__ = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
A__ = {"data": DATA_DICT_OF_LISTS}
with open(UpperCAmelCase_ , "w" ) as f:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
return path
@pytest.fixture(scope="session" )
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
A__ = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl" )
with open(UpperCAmelCase_ , "w" ) as f:
for item in DATA:
f.write(json.dumps(UpperCAmelCase_ ) + "\n" )
return path
@pytest.fixture(scope="session" )
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> Union[str, Any]:
A__ = str(tmp_path_factory.mktemp("data" ) / "dataset2.jsonl" )
with open(UpperCAmelCase_ , "w" ) as f:
for item in DATA:
f.write(json.dumps(UpperCAmelCase_ ) + "\n" )
return path
@pytest.fixture(scope="session" )
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[int]:
A__ = str(tmp_path_factory.mktemp("data" ) / "dataset_312.jsonl" )
with open(UpperCAmelCase_ , "w" ) as f:
for item in DATA_312:
f.write(json.dumps(UpperCAmelCase_ ) + "\n" )
return path
@pytest.fixture(scope="session" )
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[int]:
A__ = str(tmp_path_factory.mktemp("data" ) / "dataset-str.jsonl" )
with open(UpperCAmelCase_ , "w" ) as f:
for item in DATA_STR:
f.write(json.dumps(UpperCAmelCase_ ) + "\n" )
return path
@pytest.fixture(scope="session" )
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Union[str, Any]:
import gzip
A__ = str(tmp_path_factory.mktemp("data" ) / "dataset.txt.gz" )
with open(UpperCAmelCase_ , "rb" ) as orig_file:
with gzip.open(UpperCAmelCase_ , "wb" ) as zipped_file:
zipped_file.writelines(UpperCAmelCase_ )
return path
@pytest.fixture(scope="session" )
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
import gzip
A__ = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl.gz" )
with open(UpperCAmelCase_ , "rb" ) as orig_file:
with gzip.open(UpperCAmelCase_ , "wb" ) as zipped_file:
zipped_file.writelines(UpperCAmelCase_ )
return path
@pytest.fixture(scope="session" )
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]:
A__ = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.zip"
with zipfile.ZipFile(UpperCAmelCase_ , "w" ) as f:
f.write(UpperCAmelCase_ , arcname=os.path.basename(UpperCAmelCase_ ) )
f.write(UpperCAmelCase_ , arcname=os.path.basename(UpperCAmelCase_ ) )
return path
@pytest.fixture(scope="session" )
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Any:
A__ = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.zip"
with zipfile.ZipFile(UpperCAmelCase_ , "w" ) as f:
f.write(UpperCAmelCase_ , arcname=os.path.join("nested" , os.path.basename(UpperCAmelCase_ ) ) )
return path
@pytest.fixture(scope="session" )
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> str:
A__ = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.jsonl.zip"
with zipfile.ZipFile(UpperCAmelCase_ , "w" ) as f:
f.write(UpperCAmelCase_ , arcname=os.path.join("main_dir" , os.path.basename(UpperCAmelCase_ ) ) )
f.write(UpperCAmelCase_ , arcname=os.path.join("main_dir" , os.path.basename(UpperCAmelCase_ ) ) )
return path
@pytest.fixture(scope="session" )
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]:
A__ = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.tar"
with tarfile.TarFile(UpperCAmelCase_ , "w" ) as f:
f.add(UpperCAmelCase_ , arcname=os.path.basename(UpperCAmelCase_ ) )
f.add(UpperCAmelCase_ , arcname=os.path.basename(UpperCAmelCase_ ) )
return path
@pytest.fixture(scope="session" )
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> int:
A__ = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.tar"
with tarfile.TarFile(UpperCAmelCase_ , "w" ) as f:
f.add(UpperCAmelCase_ , arcname=os.path.join("nested" , os.path.basename(UpperCAmelCase_ ) ) )
return path
@pytest.fixture(scope="session" )
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[int]:
A__ = ["0", "1", "2", "3"]
A__ = str(tmp_path_factory.mktemp("data" ) / "dataset.txt" )
with open(UpperCAmelCase_ , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> str:
A__ = ["0", "1", "2", "3"]
A__ = str(tmp_path_factory.mktemp("data" ) / "dataset2.txt" )
with open(UpperCAmelCase_ , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[Any]:
A__ = ["0", "1", "2", "3"]
A__ = tmp_path_factory.mktemp("data" ) / "dataset.abc"
with open(UpperCAmelCase_ , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]:
A__ = tmp_path_factory.mktemp("data" ) / "dataset.text.zip"
with zipfile.ZipFile(UpperCAmelCase_ , "w" ) as f:
f.write(UpperCAmelCase_ , arcname=os.path.basename(UpperCAmelCase_ ) )
f.write(UpperCAmelCase_ , arcname=os.path.basename(UpperCAmelCase_ ) )
return path
@pytest.fixture(scope="session" )
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Optional[Any]:
A__ = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.text.zip"
with zipfile.ZipFile(UpperCAmelCase_ , "w" ) as f:
f.write(UpperCAmelCase_ , arcname=os.path.join("main_dir" , os.path.basename(UpperCAmelCase_ ) ) )
f.write(UpperCAmelCase_ , arcname=os.path.join("main_dir" , os.path.basename(UpperCAmelCase_ ) ) )
return path
@pytest.fixture(scope="session" )
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Dict:
A__ = tmp_path_factory.mktemp("data" ) / "dataset.ext.zip"
with zipfile.ZipFile(UpperCAmelCase_ , "w" ) as f:
f.write(UpperCAmelCase_ , arcname=os.path.basename("unsupported.ext" ) )
f.write(UpperCAmelCase_ , arcname=os.path.basename("unsupported_2.ext" ) )
return path
@pytest.fixture(scope="session" )
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
A__ = "\n".join(["First", "Second\u2029with Unicode new line", "Third"] )
A__ = str(tmp_path_factory.mktemp("data" ) / "dataset_with_unicode_new_lines.txt" )
with open(UpperCAmelCase_ , "w" , encoding="utf-8" ) as f:
f.write(UpperCAmelCase_ )
return path
@pytest.fixture(scope="session" )
def _SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
return os.path.join("tests" , "features" , "data" , "test_image_rgb.jpg" )
@pytest.fixture(scope="session" )
def _SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
return os.path.join("tests" , "features" , "data" , "test_audio_44100.wav" )
@pytest.fixture(scope="session" )
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Dict:
A__ = tmp_path_factory.mktemp("data" ) / "dataset.img.zip"
with zipfile.ZipFile(UpperCAmelCase_ , "w" ) as f:
f.write(UpperCAmelCase_ , arcname=os.path.basename(UpperCAmelCase_ ) )
f.write(UpperCAmelCase_ , arcname=os.path.basename(UpperCAmelCase_ ).replace(".jpg" , "2.jpg" ) )
return path
@pytest.fixture(scope="session" )
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[int]:
A__ = tmp_path_factory.mktemp("data_dir" )
(data_dir / "subdir").mkdir()
with open(data_dir / "subdir" / "train.txt" , "w" ) as f:
f.write("foo\n" * 10 )
with open(data_dir / "subdir" / "test.txt" , "w" ) as f:
f.write("bar\n" * 10 )
# hidden file
with open(data_dir / "subdir" / ".test.txt" , "w" ) as f:
f.write("bar\n" * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / ".subdir" / "train.txt" , "w" ) as f:
f.write("foo\n" * 10 )
with open(data_dir / ".subdir" / "test.txt" , "w" ) as f:
f.write("bar\n" * 10 )
return data_dir
| 353 |
"""simple docstring"""
SCREAMING_SNAKE_CASE = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
SCREAMING_SNAKE_CASE = [{"type": "code", "content": INSTALL_CONTENT}]
SCREAMING_SNAKE_CASE = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 230 | 0 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_SCREAMING_SNAKE_CASE = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
_SCREAMING_SNAKE_CASE = ' def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n'
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[Any]:
_A = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , """models/bert/""" ) )
_A = self.transformer_dir
shutil.copy(
os.path.join(lowerCAmelCase_ , """src/transformers/models/bert/modeling_bert.py""" ) , os.path.join(self.transformer_dir , """models/bert/modeling_bert.py""" ) , )
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = """src/transformers"""
shutil.rmtree(self.transformer_dir )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None ) -> List[Any]:
_A = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
_A = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
_A = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
_A = black.format_str(lowerCAmelCase_ , mode=lowerCAmelCase_ )
_A = os.path.join(self.transformer_dir , """new_code.py""" )
with open(lowerCAmelCase_ , """w""" , newline="""\n""" ) as f:
f.write(lowerCAmelCase_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCAmelCase_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowerCAmelCase_ )
with open(lowerCAmelCase_ , """r""" ) as f:
self.assertTrue(f.read() , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
_A = check_copies.find_code_in_transformers("""models.bert.modeling_bert.BertLMPredictionHead""" )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
# Base copy consistency
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , lowerCAmelCase_ , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , re.sub("""Bert""" , """TestModel""" , lowerCAmelCase_ ) , )
# Copy consistency with a really long name
_A = """TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
F'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}''' , F'''{long_class_name}LMPredictionHead''' , re.sub("""Bert""" , lowerCAmelCase_ , lowerCAmelCase_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , lowerCAmelCase_ , overwrite_result=re.sub("""Bert""" , """TestModel""" , lowerCAmelCase_ ) , )
def UpperCAmelCase ( self ) -> Dict:
_A = check_copies.LOCALIZED_READMES["""README_zh-hans.md"""]
_A = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"""
""" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"""
""" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"""
""" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"""
""" Luong, Quoc V. Le, Christopher D. Manning."""
)
_A = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_A = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"""
""" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"""
""" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"""
""" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"""
""" Christopher D. Manning 发布。\n"""
)
_A , _A = check_copies.convert_to_localized_md(
lowerCAmelCase_ , lowerCAmelCase_ , localized_readme["""format_model_list"""] )
self.assertFalse(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_A , _A = check_copies.convert_to_localized_md(
lowerCAmelCase_ , lowerCAmelCase_ , localized_readme["""format_model_list"""] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(lowerCAmelCase_ )
_A = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."""
)
_A = (
"""1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"""
""" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_A = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_A , _A = check_copies.convert_to_localized_md(
lowerCAmelCase_ , lowerCAmelCase_ , localized_readme["""format_model_list"""] )
# Check if the model link is synchronized.
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 180 | from math import isqrt, loga
def snake_case ( snake_case__ :int) -> list[int]:
_A = [True] * max_number
for i in range(2 , isqrt(max_number - 1) + 1):
if is_prime[i]:
for j in range(i**2 , snake_case__ , snake_case__):
_A = False
return [i for i in range(2 , snake_case__) if is_prime[i]]
def snake_case ( snake_case__ :int = 800_800 , snake_case__ :int = 800_800) -> int:
_A = degree * loga(snake_case__)
_A = int(snake_case__)
_A = calculate_prime_numbers(snake_case__)
_A = 0
_A = 0
_A = len(snake_case__) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left])
+ prime_numbers[left] * loga(prime_numbers[right])
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 180 | 1 |
"""simple docstring"""
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class __snake_case ( __SCREAMING_SNAKE_CASE ):
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[int] = {'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']}
return Dataset.from_dict(UpperCamelCase__ )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[str] = self._create_example_records()
lowercase : List[Any] = Dataset.from_list(UpperCamelCase__ )
self.assertListEqual(dset.column_names ,["""col_1""", """col_2"""] )
for i, r in enumerate(UpperCamelCase__ ):
self.assertDictEqual(UpperCamelCase__ ,example_records[i] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Union[str, Any] = self._create_example_records()
lowercase : Union[str, Any] = Dataset.from_list(UpperCamelCase__ )
lowercase : Dict = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info ,dset_from_dict.info )
def _SCREAMING_SNAKE_CASE ( self ): # checks what happens with missing columns
'''simple docstring'''
lowercase : Optional[Any] = [{'''col_1''': 1}, {'''col_2''': '''x'''}]
lowercase : str = Dataset.from_list(UpperCamelCase__ )
self.assertDictEqual(dset[0] ,{"""col_1""": 1} )
self.assertDictEqual(dset[1] ,{"""col_1""": None} ) # NB: first record is used for columns
def _SCREAMING_SNAKE_CASE ( self ): # checks if the type can be inferred from the second record
'''simple docstring'''
lowercase : Union[str, Any] = [{'''col_1''': []}, {'''col_1''': [1, 2]}]
lowercase : str = Dataset.from_list(UpperCamelCase__ )
self.assertEqual(dset.info.features["""col_1"""] ,Sequence(Value("""int64""" ) ) )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : int = Dataset.from_list([] )
self.assertEqual(len(UpperCamelCase__ ) ,0 )
self.assertListEqual(dset.column_names ,[] )
| 367 |
from __future__ import annotations
import numpy as np
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> tuple[np.ndarray, np.ndarray]:
lowercase , lowercase : Dict = np.shape(SCREAMING_SNAKE_CASE__ )
if rows != columns:
lowercase : str = (
"""'table' has to be of square shaped array but got a """
f"{rows}x{columns} array:\n{table}"
)
raise ValueError(SCREAMING_SNAKE_CASE__ )
lowercase : Any = np.zeros((rows, columns) )
lowercase : int = np.zeros((rows, columns) )
for i in range(SCREAMING_SNAKE_CASE__ ):
for j in range(SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[int] = sum(lower[i][k] * upper[k][j] for k in range(SCREAMING_SNAKE_CASE__ ) )
if upper[j][j] == 0:
raise ArithmeticError("""No LU decomposition exists""" )
lowercase : str = (table[i][j] - total) / upper[j][j]
lowercase : Optional[Any] = 1
for j in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = sum(lower[i][k] * upper[k][j] for k in range(SCREAMING_SNAKE_CASE__ ) )
lowercase : Tuple = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 285 | 0 |
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __A = "" , __A = False ):
"""simple docstring"""
lowerCamelCase : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
lowerCamelCase : Tuple = is_leaf
lowerCamelCase : Any = prefix
def _snake_case ( self , __A ):
"""simple docstring"""
lowerCamelCase : int = 0
for q, w in zip(self.prefix , __A ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def _snake_case ( self , __A ):
"""simple docstring"""
for word in words:
self.insert(__A )
def _snake_case ( self , __A ):
"""simple docstring"""
if self.prefix == word:
lowerCamelCase : Optional[Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
lowerCamelCase : Any = RadixNode(prefix=__A , is_leaf=__A )
else:
lowerCamelCase : List[Any] = self.nodes[word[0]]
lowerCamelCase , lowerCamelCase , lowerCamelCase : str = incoming_node.match(
__A )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(__A )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
lowerCamelCase : Union[str, Any] = remaining_prefix
lowerCamelCase : str = self.nodes[matching_string[0]]
lowerCamelCase : str = RadixNode(__A , __A )
lowerCamelCase : List[Any] = aux_node
if remaining_word == "":
lowerCamelCase : Tuple = True
else:
self.nodes[matching_string[0]].insert(__A )
def _snake_case ( self , __A ):
"""simple docstring"""
lowerCamelCase : str = self.nodes.get(word[0] , __A )
if not incoming_node:
return False
else:
lowerCamelCase , lowerCamelCase , lowerCamelCase : str = incoming_node.match(
__A )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(__A )
def _snake_case ( self , __A ):
"""simple docstring"""
lowerCamelCase : List[str] = self.nodes.get(word[0] , __A )
if not incoming_node:
return False
else:
lowerCamelCase , lowerCamelCase , lowerCamelCase : List[Any] = incoming_node.match(
__A )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(__A )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
lowerCamelCase : str = list(self.nodes.values() )[0]
lowerCamelCase : Optional[Any] = merging_node.is_leaf
self.prefix += merging_node.prefix
lowerCamelCase : Tuple = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
lowerCamelCase : Optional[Any] = False
# If there is 1 edge, we merge it with its child
else:
lowerCamelCase : List[Any] = list(incoming_node.nodes.values() )[0]
lowerCamelCase : List[str] = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
lowerCamelCase : List[str] = merging_node.nodes
return True
def _snake_case ( self , __A = 0 ):
"""simple docstring"""
if self.prefix != "":
print("-" * height , self.prefix , " (leaf)" if self.is_leaf else "" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def lowercase_( ):
'''simple docstring'''
lowerCamelCase : Optional[Any] = "banana bananas bandana band apple all beast".split()
lowerCamelCase : List[Any] = RadixNode()
root.insert_many(SCREAMING_SNAKE_CASE_ )
assert all(root.find(SCREAMING_SNAKE_CASE_ ) for word in words )
assert not root.find("bandanas" )
assert not root.find("apps" )
root.delete("all" )
assert not root.find("all" )
root.delete("banana" )
assert not root.find("banana" )
assert root.find("bananas" )
return True
def lowercase_( ):
'''simple docstring'''
assert test_trie()
def lowercase_( ):
'''simple docstring'''
lowerCamelCase : Optional[int] = RadixNode()
lowerCamelCase : Union[str, Any] = "banana bananas bandanas bandana band apple all beast".split()
root.insert_many(SCREAMING_SNAKE_CASE_ )
print("Words:" , SCREAMING_SNAKE_CASE_ )
print("Tree:" )
root.print_tree()
if __name__ == "__main__":
main()
| 283 |
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
for i in range(len(SCREAMING_SNAKE_CASE_ ) - 1 , 0 , -1 ):
lowerCamelCase : Tuple = False
for j in range(SCREAMING_SNAKE_CASE_ , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
lowerCamelCase , lowerCamelCase : int = unsorted[j - 1], unsorted[j]
lowerCamelCase : Optional[int] = True
for j in range(SCREAMING_SNAKE_CASE_ ):
if unsorted[j] > unsorted[j + 1]:
lowerCamelCase , lowerCamelCase : Union[str, Any] = unsorted[j + 1], unsorted[j]
lowerCamelCase : Optional[Any] = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = input('''Enter numbers separated by a comma:\n''').strip()
_snake_case = [int(item) for item in user_input.split(''',''')]
print(f'''{cocktail_shaker_sort(unsorted) = }''')
| 283 | 1 |
'''simple docstring'''
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
__lowerCamelCase = {
'''<''': operator.lt,
'''<=''': operator.le,
'''==''': operator.eq,
'''!=''': operator.ne,
'''>=''': operator.ge,
'''>''': operator.gt,
}
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Dict:
if got_ver is None or want_ver is None:
raise ValueError(
F'''Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'''
F''' reinstalling {pkg}.''' )
if not ops[op](version.parse(UpperCAmelCase__ ), version.parse(UpperCAmelCase__ ) ):
raise ImportError(
F'''{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}''' )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ = None ) -> None:
A_ = F'''\n{hint}''' if hint is not None else """"""
# non-versioned check
if re.match(r"""^[\w_\-\d]+$""", UpperCAmelCase__ ):
A_ , A_ , A_ = requirement, None, None
else:
A_ = re.findall(r"""^([^!=<>\s]+)([\s!=<>]{1,2}.+)""", UpperCAmelCase__ )
if not match:
raise ValueError(
"""requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"""
F''' got {requirement}''' )
A_ , A_ = match[0]
A_ = want_full.split(""",""" ) # there could be multiple requirements
A_ = {}
for w in want_range:
A_ = re.findall(r"""^([\s!=<>]{1,2})(.+)""", UpperCAmelCase__ )
if not match:
raise ValueError(
"""requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"""
F''' but got {requirement}''' )
A_ , A_ = match[0]
A_ = want_ver
if op not in ops:
raise ValueError(F'''{requirement}: need one of {list(ops.keys() )}, but got {op}''' )
# special case
if pkg == "python":
A_ = """.""".join([str(UpperCAmelCase__ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
return
# check if any version is installed
try:
A_ = importlib.metadata.version(UpperCAmelCase__ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F'''The \'{requirement}\' distribution was not found and is required by this application. {hint}''' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = """Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"""
return require_version(UpperCAmelCase__, UpperCAmelCase__ )
| 101 |
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class A__ :
def __init__( self , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
A_ = str(id_ )
A_ = None
A_ = None
A_ = []
A_ = {} # {vertex:distance}
def __lt__( self , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
return self.key < other.key
def __repr__( self ) -> Dict:
'''simple docstring'''
return self.id
def snake_case_ ( self , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
self.neighbors.append(UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
A_ = weight
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Optional[int]:
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1], UpperCAmelCase__ )
graph[b - 1].add_edge(graph[a - 1], UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> list:
A_ = []
for u in graph:
A_ = math.inf
A_ = None
A_ = 0
A_ = graph[:]
while q:
A_ = min(UpperCAmelCase__ )
q.remove(UpperCAmelCase__ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
A_ = u
A_ = u.edges[v.id]
for i in range(1, len(UpperCAmelCase__ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Iterator[tuple]:
for u in graph:
A_ = math.inf
A_ = None
A_ = 0
A_ = list(UpperCAmelCase__ )
hq.heapify(UpperCAmelCase__ )
while h:
A_ = hq.heappop(UpperCAmelCase__ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
A_ = u
A_ = u.edges[v.id]
hq.heapify(UpperCAmelCase__ )
for i in range(1, len(UpperCAmelCase__ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def UpperCAmelCase__ ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 101 | 1 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : Tuple = inspect.getfile(accelerate.test_utils )
snake_case : Dict = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] )
snake_case : List[Any] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_distributed_data_loop.py"""] )
snake_case : Union[str, Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_ops.py"""] )
@require_multi_gpu
def UpperCAmelCase ( self ) -> Tuple:
print(f"""Found {torch.cuda.device_count()} devices.""" )
snake_case : str = ["""torchrun""", f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A , env=os.environ.copy() )
@require_multi_gpu
def UpperCAmelCase ( self ) -> Any:
print(f"""Found {torch.cuda.device_count()} devices.""" )
snake_case : Union[str, Any] = ["""torchrun""", f"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path]
print(f"""Command: {cmd}""" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A , env=os.environ.copy() )
@require_multi_gpu
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : Dict = ["""torchrun""", f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A , env=os.environ.copy() )
@require_multi_gpu
def UpperCAmelCase ( self ) -> List[Any]:
print(f"""Found {torch.cuda.device_count()} devices, using 2 devices only""" )
snake_case : str = ["""torchrun""", f"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="""0,1""" ):
execute_subprocess_async(A , env=os.environ.copy() )
if __name__ == "__main__":
lowerCamelCase : Dict = Accelerator()
lowerCamelCase : Optional[int] = (accelerator.state.process_index + 2, 1_0)
lowerCamelCase : List[str] = torch.randint(0, 1_0, shape).to(accelerator.device)
lowerCamelCase : int = ''
lowerCamelCase : int = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
lowerCamelCase : Optional[int] = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
lowerCamelCase : Optional[Any] = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 124 |
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list:
if len(lowercase ) != 2 or len(a[0] ) != 2 or len(lowercase ) != 2 or len(b[0] ) != 2:
raise Exception("""Matrices are not 2x2""" )
snake_case : int = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> int:
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowercase ) )
]
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> str:
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowercase ) )
]
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> tuple[list, list, list, list]:
if len(lowercase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception("""Odd matrices are not supported!""" )
snake_case : Optional[int] = len(lowercase )
snake_case : str = matrix_length // 2
snake_case : int = [[a[i][j] for j in range(lowercase ,lowercase )] for i in range(lowercase )]
snake_case : str = [
[a[i][j] for j in range(lowercase ,lowercase )] for i in range(lowercase ,lowercase )
]
snake_case : Optional[Any] = [[a[i][j] for j in range(lowercase )] for i in range(lowercase )]
snake_case : str = [[a[i][j] for j in range(lowercase )] for i in range(lowercase ,lowercase )]
return top_left, top_right, bot_left, bot_right
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> tuple[int, int]:
return len(lowercase ), len(matrix[0] )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> None:
print("""\n""".join(str(lowercase ) for line in matrix ) )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list:
if matrix_dimensions(lowercase ) == (2, 2):
return default_matrix_multiplication(lowercase ,lowercase )
snake_case , snake_case , snake_case , snake_case : Optional[Any] = split_matrix(lowercase )
snake_case , snake_case , snake_case , snake_case : Any = split_matrix(lowercase )
snake_case : List[Any] = actual_strassen(lowercase ,matrix_subtraction(lowercase ,lowercase ) )
snake_case : List[str] = actual_strassen(matrix_addition(lowercase ,lowercase ) ,lowercase )
snake_case : Tuple = actual_strassen(matrix_addition(lowercase ,lowercase ) ,lowercase )
snake_case : str = actual_strassen(lowercase ,matrix_subtraction(lowercase ,lowercase ) )
snake_case : Union[str, Any] = actual_strassen(matrix_addition(lowercase ,lowercase ) ,matrix_addition(lowercase ,lowercase ) )
snake_case : int = actual_strassen(matrix_subtraction(lowercase ,lowercase ) ,matrix_addition(lowercase ,lowercase ) )
snake_case : List[Any] = actual_strassen(matrix_subtraction(lowercase ,lowercase ) ,matrix_addition(lowercase ,lowercase ) )
snake_case : str = matrix_addition(matrix_subtraction(matrix_addition(lowercase ,lowercase ) ,lowercase ) ,lowercase )
snake_case : List[str] = matrix_addition(lowercase ,lowercase )
snake_case : Any = matrix_addition(lowercase ,lowercase )
snake_case : List[str] = matrix_subtraction(matrix_subtraction(matrix_addition(lowercase ,lowercase ) ,lowercase ) ,lowercase )
# construct the new matrix from our 4 quadrants
snake_case : Optional[Any] = []
for i in range(len(lowercase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(lowercase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list:
if matrix_dimensions(lowercase )[1] != matrix_dimensions(lowercase )[0]:
snake_case : Optional[Any] = (
"""Unable to multiply these matrices, please check the dimensions.\n"""
f"""Matrix A: {matrixa}\n"""
f"""Matrix B: {matrixa}"""
)
raise Exception(lowercase )
snake_case : str = matrix_dimensions(lowercase )
snake_case : Optional[Any] = matrix_dimensions(lowercase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
snake_case : Dict = max(*lowercase ,*lowercase )
snake_case : Optional[Any] = int(math.pow(2 ,math.ceil(math.loga(lowercase ) ) ) )
snake_case : Any = matrixa
snake_case : Optional[Any] = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 ,lowercase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] ,lowercase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] ,lowercase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
snake_case : Optional[int] = actual_strassen(lowercase ,lowercase )
# Removing the additional zeros
for i in range(0 ,lowercase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] ,lowercase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
lowerCamelCase : Any = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
lowerCamelCase : int = [[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]]
print(strassen(matrixa, matrixa))
| 124 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
lowerCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase__ : str = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowerCAmelCase__ : List[str] = {
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
lowerCAmelCase__ : str = {
"yjernite/retribert-base-uncased": 5_12,
}
lowerCAmelCase__ : Dict = {
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE = RetriBertTokenizer
SCREAMING_SNAKE_CASE = ['''input_ids''', '''attention_mask''']
def __init__( self : Dict , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Any="[UNK]" , UpperCAmelCase_ : List[Any]="[SEP]" , UpperCAmelCase_ : Tuple="[PAD]" , UpperCAmelCase_ : Dict="[CLS]" , UpperCAmelCase_ : List[str]="[MASK]" , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Dict=None , **UpperCAmelCase_ : int , ):
"""simple docstring"""
super().__init__(
UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , tokenize_chinese_chars=UpperCAmelCase_ , strip_accents=UpperCAmelCase_ , **UpperCAmelCase_ , )
__UpperCAmelCase : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , UpperCAmelCase_ ) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCAmelCase_ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase_ ) != tokenize_chinese_chars
):
__UpperCAmelCase : Tuple = getattr(UpperCAmelCase_ , normalizer_state.pop("type" ) )
__UpperCAmelCase : int = do_lower_case
__UpperCAmelCase : Optional[Any] = strip_accents
__UpperCAmelCase : Union[str, Any] = tokenize_chinese_chars
__UpperCAmelCase : Optional[int] = normalizer_class(**UpperCAmelCase_ )
__UpperCAmelCase : Optional[int] = do_lower_case
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str]=None ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = [self.sep_token_id]
__UpperCAmelCase : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ):
"""simple docstring"""
__UpperCAmelCase : List[str] = self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
| 37 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def __UpperCamelCase ( _UpperCAmelCase=None ):
if subparsers is not None:
__UpperCAmelCase : Optional[int] = subparsers.add_parser("env" )
else:
__UpperCAmelCase : List[Any] = argparse.ArgumentParser("Accelerate env command" )
parser.add_argument(
"--config_file", default=_UpperCAmelCase, help="The config file to use for the default values in the launching script." )
if subparsers is not None:
parser.set_defaults(func=_UpperCAmelCase )
return parser
def __UpperCamelCase ( _UpperCAmelCase ):
__UpperCAmelCase : Dict = torch.__version__
__UpperCAmelCase : str = torch.cuda.is_available()
__UpperCAmelCase : str = is_xpu_available()
__UpperCAmelCase : List[Any] = is_npu_available()
__UpperCAmelCase : Union[str, Any] = "Not found"
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(_UpperCAmelCase ):
__UpperCAmelCase : Union[str, Any] = load_config_from_file(args.config_file ).to_dict()
__UpperCAmelCase : List[str] = {
"`Accelerate` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Numpy version": np.__version__,
"PyTorch version (GPU?)": F"{pt_version} ({pt_cuda_available})",
"PyTorch XPU available": str(_UpperCAmelCase ),
"PyTorch NPU available": str(_UpperCAmelCase ),
"System RAM": F"{psutil.virtual_memory().total / 1024 ** 3:.2f} GB",
}
if pt_cuda_available:
__UpperCAmelCase : int = torch.cuda.get_device_name()
print("\nCopy-and-paste the text below in your GitHub issue\n" )
print("\n".join([F"- {prop}: {val}" for prop, val in info.items()] ) )
print("- `Accelerate` default config:" if args.config_file is None else "- `Accelerate` config passed:" )
__UpperCAmelCase : Tuple = (
"\n".join([F"\t- {prop}: {val}" for prop, val in accelerate_config.items()] )
if isinstance(_UpperCAmelCase, _UpperCAmelCase )
else F"\t{accelerate_config}"
)
print(_UpperCAmelCase )
__UpperCAmelCase : Any = accelerate_config
return info
def __UpperCamelCase ( ):
__UpperCAmelCase : Tuple = env_command_parser()
__UpperCAmelCase : Dict = parser.parse_args()
env_command(_UpperCAmelCase )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 37 | 1 |
'''simple docstring'''
def lowercase__ ( __lowercase : int = 1000000 ) -> int:
"""simple docstring"""
__UpperCamelCase = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , __lowercase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 53 |
"""simple docstring"""
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
lowercase__ = logging.getLogger()
def _snake_case ( lowercase__ ):
_lowerCamelCase : List[Any] = {}
_lowerCamelCase : List[Any] = os.path.join(lowercase__ , 'all_results.json' )
if os.path.exists(lowercase__ ):
with open(lowercase__ , 'r' ) as f:
_lowerCamelCase : List[Any] = json.load(lowercase__ )
else:
raise ValueError(f'''can\'t find {path}''' )
return results
lowercase__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
def A_ ( self ):
import xla_spawn
_lowerCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
_lowerCamelCase : List[Any] = F'''
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(lowercase , 'argv' , lowercase ):
_lowerCamelCase : Dict = time()
xla_spawn.main()
_lowerCamelCase : Any = time()
_lowerCamelCase : Optional[int] = get_results(lowercase )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 500 )
def A_ ( self ):
import xla_spawn
_lowerCamelCase : Tuple = '\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n '.split()
with patch.object(lowercase , 'argv' , lowercase ):
xla_spawn.main() | 96 | 0 |
"""simple docstring"""
import math
def a__ ( snake_case__ , snake_case__ ) -> float:
if (
not isinstance(__lowerCAmelCase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * power_factor
def a__ ( snake_case__ , snake_case__ ) -> float:
if (
not isinstance(__lowerCAmelCase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350 |
"""simple docstring"""
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("""hub/hopper-medium-v2/unet/hor32""", exist_ok=True)
os.makedirs("""hub/hopper-medium-v2/unet/hor128""", exist_ok=True)
os.makedirs("""hub/hopper-medium-v2/value_function""", exist_ok=True)
def a__ ( snake_case__ ) -> Tuple:
if hor == 1_28:
lowerCamelCase = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
lowerCamelCase = (32, 1_28, 2_56)
lowerCamelCase = ("""UpResnetBlock1D""", """UpResnetBlock1D""")
elif hor == 32:
lowerCamelCase = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
lowerCamelCase = (32, 64, 1_28, 2_56)
lowerCamelCase = ("""UpResnetBlock1D""", """UpResnetBlock1D""", """UpResnetBlock1D""")
lowerCamelCase = torch.load(F'/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch' )
lowerCamelCase = model.state_dict()
lowerCamelCase = {
"""down_block_types""": down_block_types,
"""block_out_channels""": block_out_channels,
"""up_block_types""": up_block_types,
"""layers_per_block""": 1,
"""use_timestep_embedding""": True,
"""out_block_type""": """OutConv1DBlock""",
"""norm_num_groups""": 8,
"""downsample_each_block""": False,
"""in_channels""": 14,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""sample_size""": 6_55_36,
"""mid_block_type""": """MidResTemporalBlock1D""",
"""act_fn""": """mish""",
}
lowerCamelCase = UNetaDModel(**snake_case__ )
print(F'length of state dict: {len(state_dict.keys() )}' )
print(F'length of value function dict: {len(hf_value_function.state_dict().keys() )}' )
lowerCamelCase = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCamelCase = state_dict.pop(snake_case__ )
hf_value_function.load_state_dict(snake_case__ )
torch.save(hf_value_function.state_dict() , F'hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin' )
with open(F'hub/hopper-medium-v2/unet/hor{hor}/config.json' , """w""" ) as f:
json.dump(snake_case__ , snake_case__ )
def a__ ( ) -> Optional[int]:
lowerCamelCase = {
"""in_channels""": 14,
"""down_block_types""": ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D"""),
"""up_block_types""": (),
"""out_block_type""": """ValueFunction""",
"""mid_block_type""": """ValueFunctionMidBlock1D""",
"""block_out_channels""": (32, 64, 1_28, 2_56),
"""layers_per_block""": 1,
"""downsample_each_block""": True,
"""sample_size""": 6_55_36,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""use_timestep_embedding""": True,
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""norm_num_groups""": 8,
"""act_fn""": """mish""",
}
lowerCamelCase = torch.load("""/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch""" )
lowerCamelCase = model
lowerCamelCase = UNetaDModel(**snake_case__ )
print(F'length of state dict: {len(state_dict.keys() )}' )
print(F'length of value function dict: {len(hf_value_function.state_dict().keys() )}' )
lowerCamelCase = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCamelCase = state_dict.pop(snake_case__ )
hf_value_function.load_state_dict(snake_case__ )
torch.save(hf_value_function.state_dict() , """hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin""" )
with open("""hub/hopper-medium-v2/value_function/config.json""" , """w""" ) as f:
json.dump(snake_case__ , snake_case__ )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 168 | 0 |
'''simple docstring'''
def lowercase_ ( _lowercase = 50 ) -> int:
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'{solution() = }')
| 318 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__lowercase : Dict = logging.get_logger(__name__)
class __lowercase ( _lowercase ):
def __init__(self , *A , **A ):
warnings.warn(
'''The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use OwlViTImageProcessor instead.''' , A , )
super().__init__(*A , **A )
| 318 | 1 |
"""simple docstring"""
from __future__ import annotations
def a__ ( lowerCAmelCase__ ):
if len(lowerCAmelCase__ ) == 0:
return []
UpperCAmelCase_ , UpperCAmelCase_ = min(lowerCAmelCase__ ), max(lowerCAmelCase__ )
UpperCAmelCase_ = int(max_value - min_value ) + 1
UpperCAmelCase_ = [[] for _ in range(lowerCAmelCase__ )]
for i in my_list:
buckets[int(i - min_value )].append(lowerCAmelCase__ )
return [v for bucket in buckets for v in sorted(lowerCAmelCase__ )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 241 |
"""simple docstring"""
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
return [sentence[i : i + ngram_size] for i in range(len(lowerCAmelCase__ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 241 | 1 |
"""simple docstring"""
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
UpperCAmelCase = re.compile(r"""^(?P<major>\d+)""" r"""\.(?P<minor>\d+)""" r"""\.(?P<patch>\d+)$""")
@total_ordering
@dataclass
class UpperCAmelCase_ :
snake_case__ = 42
snake_case__ = None
snake_case__ = None
snake_case__ = None
snake_case__ = None
def _UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = _str_to_version_tuple(self.version_str )
def __repr__( self : Optional[int] ) -> List[str]:
return F'''{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'''
@property
def _UpperCamelCase ( self : int ) -> List[str]:
return self.major, self.minor, self.patch
def _UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : int ) -> Tuple:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
return Version(__UpperCamelCase )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
return other
raise TypeError(F'''{other} (type {type(__UpperCamelCase )}) cannot be compared to version.''' )
def __eq__( self : Any , __UpperCamelCase : Tuple ) -> Tuple:
try:
_UpperCamelCase = self._validate_operand(__UpperCamelCase )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self : Dict , __UpperCamelCase : Tuple ) -> int:
_UpperCamelCase = self._validate_operand(__UpperCamelCase )
return self.tuple < other.tuple
def __hash__( self : List[str] ) -> Optional[Any]:
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def _UpperCamelCase ( cls : Optional[Any] , __UpperCamelCase : Union[str, Any] ) -> Optional[int]:
_UpperCamelCase = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def _UpperCamelCase ( self : Tuple ) -> str:
return self.version_str
def lowercase ( a__ : List[str] ) -> str:
_UpperCamelCase = _VERSION_REG.match(a__ )
if not res:
raise ValueError(F'''Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.''' )
return tuple(int(a__ ) for v in [res.group('''major''' ), res.group('''minor''' ), res.group('''patch''' )] )
def lowercase ( a__ : Any ) -> str:
return ".".join(str(a__ ) for v in version_tuple )
| 256 | """simple docstring"""
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
UpperCAmelCase = logging.getLogger()
def lowercase ( ) -> List[str]:
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''-f''' )
_UpperCamelCase = parser.parse_args()
return args.f
def lowercase ( a__ : List[Any] ) -> Optional[Any]:
_UpperCamelCase = {}
_UpperCamelCase = os.path.join(a__ , '''all_results.json''' )
if os.path.exists(a__ ):
with open(a__ , '''r''' ) as f:
_UpperCamelCase = json.load(a__ )
else:
raise ValueError(F'''can\'t find {path}''' )
return results
def lowercase ( ) -> str:
_UpperCamelCase = torch.cuda.is_available() and torch_device == '''cuda'''
return is_using_cuda and is_apex_available()
UpperCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class UpperCAmelCase_ ( _lowercase):
@classmethod
def _UpperCamelCase ( cls : Any ) -> List[Any]:
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = os.path.join(cls.tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
_UpperCamelCase = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def _UpperCamelCase ( cls : int ) -> str:
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def _UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = F'''
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
'''.split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(__UpperCamelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''glue_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def _UpperCamelCase ( self : str ) -> Dict:
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = F'''
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
'''.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(__UpperCamelCase )
self.assertLess(result['''perplexity'''] , 100 )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''clm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def _UpperCamelCase ( self : List[str] ) -> Tuple:
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = F'''
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(__UpperCamelCase )
self.assertLess(result['''perplexity'''] , 42 )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''mlm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def _UpperCamelCase ( self : List[str] ) -> str:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
_UpperCamelCase = 7 if get_gpu_count() > 1 else 2
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = F'''
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(__UpperCamelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 )
self.assertLess(result['''train_loss'''] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''ner_no_trainer''' ) ) )
@unittest.skip(reason='''Fix me @muellerzr''' )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def _UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = F'''
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(__UpperCamelCase )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['''eval_f1'''] , 28 )
self.assertGreaterEqual(result['''eval_exact'''] , 28 )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''qa_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def _UpperCamelCase ( self : Optional[Any] ) -> str:
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = F'''
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(__UpperCamelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''swag_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def _UpperCamelCase ( self : int ) -> Optional[Any]:
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = F'''
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(__UpperCamelCase )
self.assertGreaterEqual(result['''eval_rouge1'''] , 10 )
self.assertGreaterEqual(result['''eval_rouge2'''] , 2 )
self.assertGreaterEqual(result['''eval_rougeL'''] , 7 )
self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''summarization_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def _UpperCamelCase ( self : str ) -> str:
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = F'''
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(__UpperCamelCase )
self.assertGreaterEqual(result['''eval_bleu'''] , 30 )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''translation_no_trainer''' ) ) )
@slow
def _UpperCamelCase ( self : Any ) -> List[Any]:
_UpperCamelCase = logging.StreamHandler(sys.stdout )
logger.addHandler(__UpperCamelCase )
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = F'''
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
'''.split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(__UpperCamelCase )
self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.1_0 )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def _UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = F'''
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
'''.split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(__UpperCamelCase )
# The base model scores a 25%
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''step_1''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''image_classification_no_trainer''' ) ) )
| 256 | 1 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__UpperCAmelCase : Dict = logging.get_logger(__name__)
# TODO: upload to AWS
__UpperCAmelCase : Optional[int] = {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json"
),
}
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = """retribert"""
def __init__( self : str , A : Any=30_522 , A : Dict=768 , A : List[str]=8 , A : Dict=12 , A : str=3_072 , A : Any="gelu" , A : int=0.1 , A : List[str]=0.1 , A : int=512 , A : Dict=2 , A : int=0.02 , A : List[str]=1E-12 , A : Tuple=True , A : int=128 , A : Any=0 , **A : str , ):
super().__init__(pad_token_id=A , **A )
__snake_case: Tuple = vocab_size
__snake_case: Optional[Any] = hidden_size
__snake_case: Dict = num_hidden_layers
__snake_case: List[Any] = num_attention_heads
__snake_case: Any = hidden_act
__snake_case: List[str] = intermediate_size
__snake_case: Tuple = hidden_dropout_prob
__snake_case: List[Any] = attention_probs_dropout_prob
__snake_case: Optional[Any] = max_position_embeddings
__snake_case: Any = type_vocab_size
__snake_case: List[str] = initializer_range
__snake_case: Any = layer_norm_eps
__snake_case: Optional[Any] = share_encoders
__snake_case: List[str] = projection_dim
| 293 |
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 293 | 1 |
'''simple docstring'''
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
__SCREAMING_SNAKE_CASE :List[str] = parse(importlib.metadata.version('''torch'''))
def UpperCAmelCase_ ( __lowercase : List[str] , __lowercase : Union[str, Any] , __lowercase : Optional[int] ) -> List[Any]:
'''simple docstring'''
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(f'`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}' )
_UpperCAmelCase = STR_OPERATION_TO_FUNC[operation]
if isinstance(__lowercase , __lowercase ):
_UpperCAmelCase = parse(importlib.metadata.version(__lowercase ) )
return operation(__lowercase , parse(__lowercase ) )
def UpperCAmelCase_ ( __lowercase : Tuple , __lowercase : Optional[int] ) -> Optional[int]:
'''simple docstring'''
return compare_versions(__lowercase , __lowercase , __lowercase )
| 22 |
'''simple docstring'''
class a__ :
def __init__( self , _UpperCamelCase ):
"""simple docstring"""
_lowercase : Tuple = n
_lowercase : Any = [None] * self.n
_lowercase : Tuple = 0 # index of the first element
_lowercase : Union[str, Any] = 0
_lowercase : str = 0
def __len__( self ):
"""simple docstring"""
return self.size
def _lowerCamelCase ( self ):
"""simple docstring"""
return self.size == 0
def _lowerCamelCase ( self ):
"""simple docstring"""
return False if self.is_empty() else self.array[self.front]
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
if self.size >= self.n:
raise Exception("QUEUE IS FULL" )
_lowercase : Optional[int] = data
_lowercase : Dict = (self.rear + 1) % self.n
self.size += 1
return self
def _lowerCamelCase ( self ):
"""simple docstring"""
if self.size == 0:
raise Exception("UNDERFLOW" )
_lowercase : Optional[Any] = self.array[self.front]
_lowercase : List[Any] = None
_lowercase : int = (self.front + 1) % self.n
self.size -= 1
return temp
| 250 | 0 |
"""simple docstring"""
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCamelCase_ : Union[str, Any] = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase_ : Optional[Any] = direct_transformers_import(PATH_TO_TRANSFORMERS)
lowerCamelCase_ : Optional[int] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
lowerCamelCase_ : Any = {
# used to compute the property `self.chunk_length`
"""EncodecConfig""": ["""overlap"""],
# used as `self.bert_model = BertModel(config, ...)`
"""DPRConfig""": True,
# not used in modeling files, but it's an important information
"""FSMTConfig""": ["""langs"""],
# used internally in the configuration class file
"""GPTNeoConfig""": ["""attention_types"""],
# used internally in the configuration class file
"""EsmConfig""": ["""is_folding_model"""],
# used during training (despite we don't have training script for these models yet)
"""Mask2FormerConfig""": ["""ignore_value"""],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"""OneFormerConfig""": ["""ignore_value""", """norm"""],
# used during preprocessing and collation, see `collating_graphormer.py`
"""GraphormerConfig""": ["""spatial_pos_max"""],
# used internally in the configuration class file
"""T5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"""MT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
"""UMT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
# used internally in the configuration class file
"""LongT5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
"""SwitchTransformersConfig""": ["""feed_forward_proj"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""BioGptConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""GLPNConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""SegformerConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""CvtConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""PerceiverConfig""": ["""layer_norm_eps"""],
# used internally to calculate the feature size
"""InformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""TimeSeriesTransformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""AutoformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate `mlp_dim`
"""SamVisionConfig""": ["""mlp_ratio"""],
# For (head) training, but so far not implemented
"""ClapAudioConfig""": ["""num_classes"""],
# Not used, but providing useful information to users
"""SpeechT5HifiGanConfig""": ["""sampling_rate"""],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"""CLIPSegConfig""": True,
"""DeformableDetrConfig""": True,
"""DetaConfig""": True,
"""DinatConfig""": True,
"""DonutSwinConfig""": True,
"""EfficientFormerConfig""": True,
"""FSMTConfig""": True,
"""JukeboxConfig""": True,
"""LayoutLMv2Config""": True,
"""MaskFormerSwinConfig""": True,
"""MT5Config""": True,
"""NatConfig""": True,
"""OneFormerConfig""": True,
"""PerceiverConfig""": True,
"""RagConfig""": True,
"""SpeechT5Config""": True,
"""SwinConfig""": True,
"""Swin2SRConfig""": True,
"""Swinv2Config""": True,
"""SwitchTransformersConfig""": True,
"""TableTransformerConfig""": True,
"""TapasConfig""": True,
"""TransfoXLConfig""": True,
"""UniSpeechConfig""": True,
"""UniSpeechSatConfig""": True,
"""WavLMConfig""": True,
"""WhisperConfig""": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"""JukeboxPriorConfig""": True,
# TODO: @Younes (for `is_decoder`)
"""Pix2StructTextConfig""": True,
}
)
def _A ( lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
a =False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f'''config.{attribute}''' in modeling_source
or f'''getattr(config, "{attribute}"''' in modeling_source
or f'''getattr(self.config, "{attribute}"''' in modeling_source
):
a =True
# Deal with multi-line cases
elif (
re.search(
Rf'''getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"''' , lowercase , )
is not None
):
a =True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
a =True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
a =[
'''bos_index''',
'''eos_index''',
'''pad_index''',
'''unk_index''',
'''mask_index''',
'''image_size''',
'''use_cache''',
'''out_features''',
'''out_indices''',
]
a =['''encoder_no_repeat_ngram_size''']
# Special cases to be allowed
a =True
if not attribute_used:
a =False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
a =True
elif attribute in ["tie_word_embeddings"] and default_value is False:
a =True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
a =True
elif attribute.endswith('''_token_id''' ):
a =True
# configuration class specific cases
if not case_allowed:
a =SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
a =allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def _A ( lowercase ):
"""simple docstring"""
a =dict(inspect.signature(config_class.__init__ ).parameters )
a =[x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']]
a =[signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
a ={}
if len(config_class.attribute_map ) > 0:
a ={v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
a =inspect.getsourcefile(lowercase )
a =os.path.dirname(lowercase )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
a =[os.path.join(lowercase , lowercase ) for fn in os.listdir(lowercase ) if fn.startswith('''modeling_''' )]
# Get the source code strings
a =[]
for path in modeling_paths:
if os.path.isfile(lowercase ):
with open(lowercase ) as fp:
modeling_sources.append(fp.read() )
a =[]
for config_param, default_value in zip(lowercase , lowercase ):
# `attributes` here is all the variant names for `config_param`
a =[config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(lowercase , lowercase , lowercase , lowercase ):
unused_attributes.append(attributes[0] )
return sorted(lowercase )
def _A ( ):
"""simple docstring"""
a ={}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
a =[
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda lowercase : inspect.isclass(lowercase )
and issubclass(lowercase , lowercase )
and inspect.getmodule(lowercase ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
a =check_config_attributes_being_used(lowercase )
if len(lowercase ) > 0:
a =unused_attributes
if len(lowercase ) > 0:
a ='''The following configuration classes contain unused attributes in the corresponding modeling files:\n'''
for name, attributes in configs_with_unused_attributes.items():
error += f'''{name}: {attributes}\n'''
raise ValueError(lowercase )
if __name__ == "__main__":
check_config_attributes() | 215 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase_ : Union[str, Any] = {"""processing_layoutxlm""": ["""LayoutXLMProcessor"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[int] = ["""LayoutXLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = ["""LayoutXLMTokenizerFast"""]
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
lowerCamelCase_ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 215 | 1 |
import numpy as np
import datasets
_SCREAMING_SNAKE_CASE = '\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n'
_SCREAMING_SNAKE_CASE = '\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n'
_SCREAMING_SNAKE_CASE = '\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""X""": datasets.Sequence(datasets.Value("""float""" , id="""sequence""" ) , id="""X""" ),
} ) , )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
# convert to numpy arrays
_A = np.array(lowerCAmelCase_ )
_A = np.array(lowerCAmelCase_ )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("""Expected `X` to be a 2D vector""" )
if len(reference_distribution.shape ) != 2:
raise ValueError("""Expected `reference_distribution` to be a 2D vector""" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"""Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension""" )
# Get mahalanobis distance for each prediction
_A = X - np.mean(lowerCAmelCase_ )
_A = np.cov(reference_distribution.T )
try:
_A = np.linalg.inv(lowerCAmelCase_ )
except np.linalg.LinAlgError:
_A = np.linalg.pinv(lowerCAmelCase_ )
_A = np.dot(lowerCAmelCase_ , lowerCAmelCase_ )
_A = np.dot(lowerCAmelCase_ , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 180 | import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class a ( unittest.TestCase ):
"""simple docstring"""
lowerCamelCase :Tuple = JukeboxTokenizer
lowerCamelCase :str = {
'''artist''': '''Zac Brown Band''',
'''genres''': '''Country''',
'''lyrics''': '''I met a traveller from an antique land,
Who said "Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
''',
}
@require_torch
def UpperCAmelCase ( self ) -> Tuple:
import torch
_A = JukeboxTokenizer.from_pretrained("""openai/jukebox-1b-lyrics""" )
_A = tokenizer(**self.metas )["""input_ids"""]
# fmt: off
_A = [
torch.tensor([[
0, 0, 0, 71_69, 5_07, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def UpperCAmelCase ( self ) -> List[str]:
import torch
_A = JukeboxTokenizer.from_pretrained("""openai/jukebox-5b-lyrics""" )
_A = tokenizer(**self.metas )["""input_ids"""]
# fmt: off
_A = [
torch.tensor([[
0, 0, 0, 10_69, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 180 | 1 |
'''simple docstring'''
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = 0
lowerCAmelCase__ = False
lowerCAmelCase__ = 3.0
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"""a""": 2} )
self.assertDictEqual(MockClass(a=2 , b=__SCREAMING_SNAKE_CASE ).to_kwargs() , {"""a""": 2, """b""": True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {"""a""": 2, """c""": 2.25} )
@require_cuda
def UpperCAmelCase__ ( self : Tuple ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = GradScalerKwargs(init_scale=1_024 , growth_factor=2 )
AcceleratorState._reset_state()
__SCREAMING_SNAKE_CASE = Accelerator(mixed_precision="""fp16""" , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
__SCREAMING_SNAKE_CASE = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2_000 )
self.assertEqual(scaler._enabled , __SCREAMING_SNAKE_CASE )
@require_multi_gpu
def UpperCAmelCase__ ( self : str ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ["""torchrun""", f'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(__SCREAMING_SNAKE_CASE , env=os.environ.copy() )
if __name__ == "__main__":
UpperCAmelCase : Union[str, Any] = DistributedDataParallelKwargs(bucket_cap_mb=1_5, find_unused_parameters=True)
UpperCAmelCase : Any = Accelerator(kwargs_handlers=[ddp_scaler])
UpperCAmelCase : List[Any] = torch.nn.Linear(1_0_0, 2_0_0)
UpperCAmelCase : List[Any] = accelerator.prepare(model)
# Check the values changed in kwargs
UpperCAmelCase : Dict = ''
UpperCAmelCase : Optional[Any] = model.bucket_bytes_cap // (1_0_2_4 * 1_0_2_4)
if observed_bucket_cap_map != 1_5:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 331 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
UpperCAmelCase : Optional[int] = 'examples/'
UpperCAmelCase : List[str] = {
'examples': (re.compile(R'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(R'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(R'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), R'\1version="VERSION",'),
'doc': (re.compile(R'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
UpperCAmelCase : Union[str, Any] = {
'init': 'src/diffusers/__init__.py',
'setup': 'setup.py',
}
UpperCAmelCase : Tuple = 'README.md'
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
with open(a__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__SCREAMING_SNAKE_CASE = f.read()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = REPLACE_PATTERNS[pattern]
__SCREAMING_SNAKE_CASE = replace.replace("""VERSION""" , a__ )
__SCREAMING_SNAKE_CASE = re_pattern.sub(a__ , a__ )
with open(a__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(a__ )
def a__ ( a__ ):
"""simple docstring"""
for folder, directories, fnames in os.walk(a__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(a__ , a__ ) , a__ , pattern="""examples""" )
def a__ ( a__ , a__=False ):
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(a__ , a__ , a__ )
if not patch:
update_version_in_examples(a__ )
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """🤗 Transformers currently provides the following architectures"""
__SCREAMING_SNAKE_CASE = """1. Want to contribute a new model?"""
with open(a__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__SCREAMING_SNAKE_CASE = f.readlines()
# Find the start of the list.
__SCREAMING_SNAKE_CASE = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__SCREAMING_SNAKE_CASE = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
__SCREAMING_SNAKE_CASE = lines[index].replace(
"""https://huggingface.co/docs/diffusers/main/model_doc""" , """https://huggingface.co/docs/diffusers/model_doc""" , )
index += 1
with open(a__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(a__ )
def a__ ( ):
"""simple docstring"""
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
__SCREAMING_SNAKE_CASE = f.read()
__SCREAMING_SNAKE_CASE = REPLACE_PATTERNS["""init"""][0].search(a__ ).groups()[0]
return packaging.version.parse(a__ )
def a__ ( a__=False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
__SCREAMING_SNAKE_CASE = default_version.base_version
elif patch:
__SCREAMING_SNAKE_CASE = F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
__SCREAMING_SNAKE_CASE = F'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
__SCREAMING_SNAKE_CASE = input(F'Which version are you releasing? [{default_version}]' )
if len(a__ ) == 0:
__SCREAMING_SNAKE_CASE = default_version
print(F'Updating version to {version}.' )
global_version_update(a__ , patch=a__ )
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = get_version()
__SCREAMING_SNAKE_CASE = F'{current_version.major}.{current_version.minor + 1}.0.dev0'
__SCREAMING_SNAKE_CASE = current_version.base_version
# Check with the user we got that right.
__SCREAMING_SNAKE_CASE = input(F'Which version are we developing now? [{dev_version}]' )
if len(a__ ) == 0:
__SCREAMING_SNAKE_CASE = dev_version
print(F'Updating version to {version}.' )
global_version_update(a__ )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
UpperCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
UpperCAmelCase : Dict = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 331 | 1 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if isinstance(lowerCamelCase__ , torch.Tensor ):
return image
elif isinstance(lowerCamelCase__ , PIL.Image.Image ):
lowerCamelCase_ = [image]
if isinstance(image[0] , PIL.Image.Image ):
lowerCamelCase_ = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) )[None, :] for i in image]
lowerCamelCase_ = np.concatenate(lowerCamelCase__ , axis=0 )
lowerCamelCase_ = np.array(lowerCamelCase__ ).astype(np.floataa ) / 2_55.0
lowerCamelCase_ = image.transpose(0 , 3 , 1 , 2 )
lowerCamelCase_ = 2.0 * image - 1.0
lowerCamelCase_ = torch.from_numpy(lowerCamelCase__ )
elif isinstance(image[0] , torch.Tensor ):
lowerCamelCase_ = torch.cat(lowerCamelCase__ , dim=0 )
return image
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=0.99_95 ):
if not isinstance(lowerCamelCase__ , np.ndarray ):
lowerCamelCase_ = True
lowerCamelCase_ = va.device
lowerCamelCase_ = va.cpu().numpy()
lowerCamelCase_ = va.cpu().numpy()
lowerCamelCase_ = np.sum(va * va / (np.linalg.norm(lowerCamelCase__ ) * np.linalg.norm(lowerCamelCase__ )) )
if np.abs(lowerCamelCase__ ) > DOT_THRESHOLD:
lowerCamelCase_ = (1 - t) * va + t * va
else:
lowerCamelCase_ = np.arccos(lowerCamelCase__ )
lowerCamelCase_ = np.sin(lowerCamelCase__ )
lowerCamelCase_ = theta_a * t
lowerCamelCase_ = np.sin(lowerCamelCase__ )
lowerCamelCase_ = np.sin(theta_a - theta_t ) / sin_theta_a
lowerCamelCase_ = sin_theta_t / sin_theta_a
lowerCamelCase_ = sa * va + sa * va
if inputs_are_torch:
lowerCamelCase_ = torch.from_numpy(lowerCamelCase__ ).to(lowerCamelCase__ )
return va
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = F.normalize(lowerCamelCase__ , dim=-1 )
lowerCamelCase_ = F.normalize(lowerCamelCase__ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
for param in model.parameters():
lowerCamelCase_ = value
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase=None , lowercase=None , lowercase=None , ) -> Union[str, Any]:
super().__init__()
self.register_modules(
vae=lowercase , text_encoder=lowercase , clip_model=lowercase , tokenizer=lowercase , unet=lowercase , scheduler=lowercase , feature_extractor=lowercase , coca_model=lowercase , coca_tokenizer=lowercase , coca_transform=lowercase , )
lowerCamelCase_ = (
feature_extractor.size
if isinstance(feature_extractor.size , lowercase )
else feature_extractor.size["shortest_edge"]
)
lowerCamelCase_ = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , lowercase )
set_requires_grad(self.clip_model , lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase = "auto" ) -> Union[str, Any]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
self.enable_attention_slicing(lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> int:
set_requires_grad(self.vae , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
set_requires_grad(self.vae , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
set_requires_grad(self.unet , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
set_requires_grad(self.unet , lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase ) -> List[Any]:
# get the original timestep using init_timestep
lowerCamelCase_ = min(int(num_inference_steps * strength ) , lowercase )
lowerCamelCase_ = max(num_inference_steps - init_timestep , 0 )
lowerCamelCase_ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase=None ) -> Any:
if not isinstance(lowercase , torch.Tensor ):
raise ValueError(f'`image` has to be of type `torch.Tensor` but is {type(lowercase )}' )
lowerCamelCase_ = image.to(device=lowercase , dtype=lowercase )
if isinstance(lowercase , lowercase ):
lowerCamelCase_ = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(lowercase )
]
lowerCamelCase_ = torch.cat(lowercase , dim=0 )
else:
lowerCamelCase_ = self.vae.encode(lowercase ).latent_dist.sample(lowercase )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowerCamelCase_ = 0.1_8_2_1_5 * init_latents
lowerCamelCase_ = init_latents.repeat_interleave(lowercase , dim=0 )
lowerCamelCase_ = randn_tensor(init_latents.shape , generator=lowercase , device=lowercase , dtype=lowercase )
# get latents
lowerCamelCase_ = self.scheduler.add_noise(lowercase , lowercase , lowercase )
lowerCamelCase_ = init_latents
return latents
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Tuple:
lowerCamelCase_ = self.coca_transform(lowercase ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
lowerCamelCase_ = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
lowerCamelCase_ = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("<end_of_text>" )[0].replace("<start_of_text>" , "" ).rstrip(" .," )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> str:
lowerCamelCase_ = self.feature_extractor.preprocess(lowercase )
lowerCamelCase_ = torch.from_numpy(clip_image_input["pixel_values"][0] ).unsqueeze(0 ).to(self.device ).half()
lowerCamelCase_ = self.clip_model.get_image_features(lowercase )
lowerCamelCase_ = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=lowercase )
lowerCamelCase_ = image_embeddings_clip.repeat_interleave(lowercase , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Optional[Any]:
lowerCamelCase_ = latents.detach().requires_grad_()
lowerCamelCase_ = self.scheduler.scale_model_input(lowercase , lowercase )
# predict the noise residual
lowerCamelCase_ = self.unet(lowercase , lowercase , encoder_hidden_states=lowercase ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
lowerCamelCase_ = self.scheduler.alphas_cumprod[timestep]
lowerCamelCase_ = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCamelCase_ = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
lowerCamelCase_ = torch.sqrt(lowercase )
lowerCamelCase_ = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , lowercase ):
lowerCamelCase_ = self.scheduler.sigmas[index]
lowerCamelCase_ = latents - sigma * noise_pred
else:
raise ValueError(f'scheduler type {type(self.scheduler )} not supported' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowerCamelCase_ = 1 / 0.1_8_2_1_5 * sample
lowerCamelCase_ = self.vae.decode(lowercase ).sample
lowerCamelCase_ = (image / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase_ = transforms.Resize(self.feature_extractor_size )(lowercase )
lowerCamelCase_ = self.normalize(lowercase ).to(latents.dtype )
lowerCamelCase_ = self.clip_model.get_image_features(lowercase )
lowerCamelCase_ = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=lowercase )
lowerCamelCase_ = spherical_dist_loss(lowercase , lowercase ).mean() * clip_guidance_scale
lowerCamelCase_ = -torch.autograd.grad(lowercase , lowercase )[0]
if isinstance(self.scheduler , lowercase ):
lowerCamelCase_ = latents.detach() + grads * (sigma**2)
lowerCamelCase_ = noise_pred_original
else:
lowerCamelCase_ = noise_pred_original - torch.sqrt(lowercase ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self , lowercase , lowercase , lowercase = None , lowercase = None , lowercase = 512 , lowercase = 512 , lowercase = 0.6 , lowercase = 50 , lowercase = 7.5 , lowercase = 1 , lowercase = 0.0 , lowercase = 100 , lowercase = None , lowercase = "pil" , lowercase = True , lowercase = 0.8 , lowercase = 0.1 , lowercase = 0.1 , ) -> int:
if isinstance(lowercase , lowercase ) and len(lowercase ) != batch_size:
raise ValueError(f'You have passed {batch_size} batch_size, but only {len(lowercase )} generators.' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.' )
if isinstance(lowercase , torch.Generator ) and batch_size > 1:
lowerCamelCase_ = [generator] + [None] * (batch_size - 1)
lowerCamelCase_ = [
("model", self.coca_model is None),
("tokenizer", self.coca_tokenizer is None),
("transform", self.coca_transform is None),
]
lowerCamelCase_ = [x[0] for x in coca_is_none if x[1]]
lowerCamelCase_ = ", ".join(lowercase )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(lowercase ):
raise ValueError(
f'Content prompt is None and CoCa [{coca_is_none_str}] is None.'
f'Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.' )
lowerCamelCase_ = self.get_image_description(lowercase )
if style_prompt is None:
if len(lowercase ):
raise ValueError(
f'Style prompt is None and CoCa [{coca_is_none_str}] is None.'
f' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.' )
lowerCamelCase_ = self.get_image_description(lowercase )
# get prompt text embeddings for content and style
lowerCamelCase_ = self.tokenizer(
lowercase , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=lowercase , return_tensors="pt" , )
lowerCamelCase_ = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
lowerCamelCase_ = self.tokenizer(
lowercase , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=lowercase , return_tensors="pt" , )
lowerCamelCase_ = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
lowerCamelCase_ = slerp(lowercase , lowercase , lowercase )
# duplicate text embeddings for each generation per prompt
lowerCamelCase_ = text_embeddings.repeat_interleave(lowercase , dim=0 )
# set timesteps
lowerCamelCase_ = "offset" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
lowerCamelCase_ = {}
if accepts_offset:
lowerCamelCase_ = 1
self.scheduler.set_timesteps(lowercase , **lowercase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
lowerCamelCase_ , lowerCamelCase_ = self.get_timesteps(lowercase , lowercase , self.device )
lowerCamelCase_ = timesteps[:1].repeat(lowercase )
# Preprocess image
lowerCamelCase_ = preprocess(lowercase , lowercase , lowercase )
lowerCamelCase_ = self.prepare_latents(
lowercase , lowercase , lowercase , text_embeddings.dtype , self.device , lowercase )
lowerCamelCase_ = preprocess(lowercase , lowercase , lowercase )
lowerCamelCase_ = self.prepare_latents(
lowercase , lowercase , lowercase , text_embeddings.dtype , self.device , lowercase )
lowerCamelCase_ = slerp(lowercase , lowercase , lowercase )
if clip_guidance_scale > 0:
lowerCamelCase_ = self.get_clip_image_embeddings(lowercase , lowercase )
lowerCamelCase_ = self.get_clip_image_embeddings(lowercase , lowercase )
lowerCamelCase_ = slerp(
lowercase , lowercase , lowercase )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCamelCase_ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCamelCase_ = content_text_input.input_ids.shape[-1]
lowerCamelCase_ = self.tokenizer([""] , padding="max_length" , max_length=lowercase , return_tensors="pt" )
lowerCamelCase_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
lowerCamelCase_ = uncond_embeddings.repeat_interleave(lowercase , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase_ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCamelCase_ = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
lowerCamelCase_ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
lowerCamelCase_ = torch.randn(lowercase , generator=lowercase , device="cpu" , dtype=lowercase ).to(
self.device )
else:
lowerCamelCase_ = torch.randn(lowercase , generator=lowercase , device=self.device , dtype=lowercase )
else:
if latents.shape != latents_shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
lowerCamelCase_ = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase_ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCamelCase_ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCamelCase_ = {}
if accepts_eta:
lowerCamelCase_ = eta
# check if the scheduler accepts generator
lowerCamelCase_ = "generator" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
lowerCamelCase_ = generator
with self.progress_bar(total=lowercase ):
for i, t in enumerate(lowercase ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase_ = self.scheduler.scale_model_input(lowercase , lowercase )
# predict the noise residual
lowerCamelCase_ = self.unet(lowercase , lowercase , encoder_hidden_states=lowercase ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
lowerCamelCase_ , lowerCamelCase_ = noise_pred.chunk(2 )
lowerCamelCase_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
lowerCamelCase_ = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
lowerCamelCase_ , lowerCamelCase_ = self.cond_fn(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , )
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase_ = self.scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowerCamelCase_ = 1 / 0.1_8_2_1_5 * latents
lowerCamelCase_ = self.vae.decode(lowercase ).sample
lowerCamelCase_ = (image / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase_ = self.numpy_to_pil(lowercase )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=lowercase , nsfw_content_detected=lowercase )
| 19 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A ={
'''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''BloomTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BloomForCausalLM''',
'''BloomModel''',
'''BloomPreTrainedModel''',
'''BloomForSequenceClassification''',
'''BloomForTokenClassification''',
'''BloomForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 19 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
__snake_case =logging.get_logger(__name__)
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : int , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : List[str] ) -> None:
warnings.warn(
'The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DPTImageProcessor instead.' , UpperCAmelCase__ , )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 55 |
'''simple docstring'''
def a_ ( lowerCamelCase : list[int] ):
if not nums: # Makes sure that the list is not empty
raise ValueError('List is empty' )
lowerCAmelCase = sum(lowerCamelCase ) / len(lowerCamelCase ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55 | 1 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=__a )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = field(default="language-modeling" , metadata={"include_in_asdict_even_if_is_default": True} )
lowercase__ = Features({"text": Value("string" )} )
lowercase__ = Features({} )
lowercase__ = "text"
@property
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
return {self.text_column: "text"}
| 64 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase = {
'''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''],
'''tokenization_mvp''': ['''MvpTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ['''MvpTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
'''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MvpForCausalLM''',
'''MvpForConditionalGeneration''',
'''MvpForQuestionAnswering''',
'''MvpForSequenceClassification''',
'''MvpModel''',
'''MvpPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 188 | 0 |
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def UpperCAmelCase ( a_ ) -> None:
"""simple docstring"""
__A , __A = analyze_text(a_ )
__A = list(" " + ascii_lowercase )
# what is our total sum of probabilities.
__A = sum(single_char_strings.values() )
# one length string
__A = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
__A = single_char_strings[ch]
__A = my_str / all_sum
my_fir_sum += prob * math.loga(a_ ) # entropy formula.
# print entropy
print(F'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
__A = sum(two_char_strings.values() )
__A = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
__A = cha + cha
if sequence in two_char_strings:
__A = two_char_strings[sequence]
__A = int(a_ ) / all_sum
my_sec_sum += prob * math.loga(a_ )
# print second entropy
print(F'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(F'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def UpperCAmelCase ( a_ ) -> tuple[dict, dict]:
"""simple docstring"""
__A = Counter() # type: ignore
__A = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(a_ ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 352 |
def UpperCAmelCase ( ) -> list[list[int]]:
"""simple docstring"""
return [list(range(1_0_0_0 - i , -1_0_0_0 - i , -1 ) ) for i in range(1_0_0_0 )]
SCREAMING_SNAKE_CASE :List[str] = generate_large_matrix()
SCREAMING_SNAKE_CASE :str = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def UpperCAmelCase ( a_ ) -> None:
"""simple docstring"""
assert all(row == sorted(a_ , reverse=a_ ) for row in grid )
assert all(list(a_ ) == sorted(a_ , reverse=a_ ) for col in zip(*a_ ) )
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
__A = 0
__A = len(a_ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
__A = (left + right) // 2
__A = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
__A = mid + 1
else:
__A = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(a_ )
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
__A = 0
__A = len(grid[0] )
for i in range(len(a_ ) ):
__A = find_negative_index(grid[i][:bound] )
total += bound
return (len(a_ ) * len(grid[0] )) - total
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
return len([number for row in grid for number in row if number < 0] )
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
__A = 0
for row in grid:
for i, number in enumerate(a_ ):
if number < 0:
total += len(a_ ) - i
break
return total
def UpperCAmelCase ( ) -> None:
"""simple docstring"""
from timeit import timeit
print("Running benchmarks" )
__A = (
"from __main__ import count_negatives_binary_search, "
"count_negatives_brute_force, count_negatives_brute_force_with_break, grid"
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
__A = timeit(F'''{func}(grid=grid)''' , setup=a_ , number=5_0_0 )
print(F'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 124 | 0 |
'''simple docstring'''
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
a__ : Any = 'sshleifer/bart-tiny-random'
a__ : str = 'patrickvonplaten/t5-tiny-random'
@require_torch
class lowercase_ ( unittest.TestCase ):
@cached_property
def __a ( self ):
return AutoConfig.from_pretrained(a )
def __a ( self ):
UpperCamelCase__ , *UpperCamelCase__ = create_student_by_copying_alternating_layers(a , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def __a ( self ):
UpperCamelCase__ , *UpperCamelCase__ = create_student_by_copying_alternating_layers(a , tempfile.mkdtemp() , e=1 , d=a )
def __a ( self ):
UpperCamelCase__ , *UpperCamelCase__ = create_student_by_copying_alternating_layers(a , tempfile.mkdtemp() , e=1 , d=a )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def __a ( self ):
UpperCamelCase__ , *UpperCamelCase__ = create_student_by_copying_alternating_layers(a , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def __a ( self ):
with self.assertRaises(a ):
create_student_by_copying_alternating_layers(a , tempfile.mkdtemp() , e=a , d=a )
| 80 | """simple docstring"""
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
UpperCAmelCase = logging.getLogger()
def lowercase ( ) -> List[str]:
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''-f''' )
_UpperCamelCase = parser.parse_args()
return args.f
def lowercase ( a__ : List[Any] ) -> Optional[Any]:
_UpperCamelCase = {}
_UpperCamelCase = os.path.join(a__ , '''all_results.json''' )
if os.path.exists(a__ ):
with open(a__ , '''r''' ) as f:
_UpperCamelCase = json.load(a__ )
else:
raise ValueError(F'''can\'t find {path}''' )
return results
def lowercase ( ) -> str:
_UpperCamelCase = torch.cuda.is_available() and torch_device == '''cuda'''
return is_using_cuda and is_apex_available()
UpperCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class UpperCAmelCase_ ( _lowercase):
@classmethod
def _UpperCamelCase ( cls : Any ) -> List[Any]:
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = os.path.join(cls.tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
_UpperCamelCase = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def _UpperCamelCase ( cls : int ) -> str:
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def _UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = F'''
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
'''.split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(__UpperCamelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''glue_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def _UpperCamelCase ( self : str ) -> Dict:
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = F'''
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
'''.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(__UpperCamelCase )
self.assertLess(result['''perplexity'''] , 100 )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''clm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def _UpperCamelCase ( self : List[str] ) -> Tuple:
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = F'''
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(__UpperCamelCase )
self.assertLess(result['''perplexity'''] , 42 )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''mlm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def _UpperCamelCase ( self : List[str] ) -> str:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
_UpperCamelCase = 7 if get_gpu_count() > 1 else 2
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = F'''
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(__UpperCamelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 )
self.assertLess(result['''train_loss'''] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''ner_no_trainer''' ) ) )
@unittest.skip(reason='''Fix me @muellerzr''' )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def _UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = F'''
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(__UpperCamelCase )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['''eval_f1'''] , 28 )
self.assertGreaterEqual(result['''eval_exact'''] , 28 )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''qa_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def _UpperCamelCase ( self : Optional[Any] ) -> str:
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = F'''
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(__UpperCamelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''swag_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def _UpperCamelCase ( self : int ) -> Optional[Any]:
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = F'''
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(__UpperCamelCase )
self.assertGreaterEqual(result['''eval_rouge1'''] , 10 )
self.assertGreaterEqual(result['''eval_rouge2'''] , 2 )
self.assertGreaterEqual(result['''eval_rougeL'''] , 7 )
self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''summarization_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def _UpperCamelCase ( self : str ) -> str:
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = F'''
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(__UpperCamelCase )
self.assertGreaterEqual(result['''eval_bleu'''] , 30 )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''translation_no_trainer''' ) ) )
@slow
def _UpperCamelCase ( self : Any ) -> List[Any]:
_UpperCamelCase = logging.StreamHandler(sys.stdout )
logger.addHandler(__UpperCamelCase )
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = F'''
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
'''.split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(__UpperCamelCase )
self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.1_0 )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def _UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = F'''
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
'''.split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(__UpperCamelCase )
# The base model scores a 25%
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''step_1''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , '''image_classification_no_trainer''' ) ) )
| 256 | 0 |
"""simple docstring"""
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def a__ ( __SCREAMING_SNAKE_CASE ) -> int:
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() )
@pytest.fixture
def a__ ( __SCREAMING_SNAKE_CASE ) -> Optional[int]:
class snake_case :
def __init__( self : Optional[Any] , UpperCamelCase__ : int)-> int:
'''simple docstring'''
__lowerCAmelCase: Any = metric_id
class snake_case :
SCREAMING_SNAKE_CASE_ : Optional[Any] = [MetricMock(__snake_case ) for metric_id in ["""accuracy""", """mse""", """precision""", """codeparrot/apps_metric"""]]
def lowercase_ ( self : Optional[Any])-> List[Any]:
'''simple docstring'''
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() )
@pytest.mark.parametrize(
"func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Any:
if "tmp_path" in args:
__lowerCAmelCase: Tuple = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(a__ , match="https://huggingface.co/docs/evaluate" ):
func(*a__ )
| 361 |
"""simple docstring"""
import comet # From: unbabel-comet
import torch
import datasets
__A = datasets.logging.get_logger(__name__)
__A = "\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel's Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = \"{COMET}: A Neural Framework for {MT} Evaluation\",\n author = \"Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon\",\n booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",\n pages = \"2685--2702\",\n}\n"
__A = "\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n"
__A = "\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric('comet')\n >>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use\n >>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]\n >>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]\n >>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [0.19, 0.92]\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
def lowercase_ ( self : List[Any])-> Dict:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://unbabel.github.io/COMET/html/index.html" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"sources": datasets.Value("string" , id="sequence"),
"predictions": datasets.Value("string" , id="sequence"),
"references": datasets.Value("string" , id="sequence"),
}) , codebase_urls=["https://github.com/Unbabel/COMET"] , reference_urls=[
"https://github.com/Unbabel/COMET",
"https://www.aclweb.org/anthology/2020.emnlp-main.213/",
"http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6",
] , )
def lowercase_ ( self : Tuple , UpperCamelCase__ : Any)-> Dict:
'''simple docstring'''
if self.config_name == "default":
__lowerCAmelCase: Union[str, Any] = comet.load_from_checkpoint(comet.download_model("wmt20-comet-da"))
else:
__lowerCAmelCase: Tuple = comet.load_from_checkpoint(comet.download_model(self.config_name))
def lowercase_ ( self : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : List[str]=False)-> str:
'''simple docstring'''
if gpus is None:
__lowerCAmelCase: Union[str, Any] = 1 if torch.cuda.is_available() else 0
__lowerCAmelCase: Dict = {"src": sources, "mt": predictions, "ref": references}
__lowerCAmelCase: Union[str, Any] = [dict(zip(UpperCamelCase__ , UpperCamelCase__)) for t in zip(*data.values())]
__lowerCAmelCase , __lowerCAmelCase: str = self.scorer.predict(UpperCamelCase__ , gpus=UpperCamelCase__ , progress_bar=UpperCamelCase__)
return {"mean_score": mean_score, "scores": scores}
| 108 | 0 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
@staticmethod
@abstractmethod
def __UpperCAmelCase ( __lowerCamelCase : ArgumentParser ) -> Dict:
raise NotImplementedError()
@abstractmethod
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
raise NotImplementedError()
| 107 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class snake_case__ :
"""simple docstring"""
def __init__( self : Any , __lowerCamelCase : list[tuple[float, float]] ) -> Tuple:
a = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
a = len(__lowerCamelCase ) - 1
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : float ) -> list[float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
a = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , __lowerCamelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(__lowerCamelCase ) , 5 ) == 1
return output_values
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : float ) -> tuple[float, float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
a = self.basis_function(__lowerCamelCase )
a = 0.0
a = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : float = 0.01 ) -> List[str]:
from matplotlib import pyplot as plt # type: ignore
a = [] # x coordinates of points to plot
a = [] # y coordinates of points to plot
a = 0.0
while t <= 1:
a = self.bezier_curve_function(__lowerCamelCase )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
a = [i[0] for i in self.list_of_points]
a = [i[1] for i in self.list_of_points]
plt.plot(
__lowerCamelCase , __lowerCamelCase , color="blue" , label="Curve of Degree " + str(self.degree ) , )
plt.scatter(__lowerCamelCase , __lowerCamelCase , color="red" , label="Control Points" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 107 | 1 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : bool = False ):
if not isinstance(_lowerCamelCase ,_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = f"Expected string as input, found {type(_lowerCamelCase )}"
raise ValueError(_lowerCamelCase )
if not isinstance(_lowerCamelCase ,_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = f"Expected boolean as use_pascal parameter, found {type(_lowerCamelCase )}"
raise ValueError(_lowerCamelCase )
_lowerCAmelCase : str = input_str.split("""_""" )
_lowerCAmelCase : Optional[int] = 0 if use_pascal else 1
_lowerCAmelCase : List[Any] = words[start_index:]
_lowerCAmelCase : Union[str, Any] = [word[0].upper() + word[1:] for word in words_to_capitalize]
_lowerCAmelCase : List[Any] = """""" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 365 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_a : List[Any] = logging.get_logger(__name__)
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Tuple = "maskformer-swin"
_UpperCamelCase : Union[str, Any] = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , a__=224 , a__=4 , a__=3 , a__=96 , a__=[2, 2, 6, 2] , a__=[3, 6, 12, 24] , a__=7 , a__=4.0 , a__=True , a__=0.0 , a__=0.0 , a__=0.1 , a__="gelu" , a__=False , a__=0.0_2 , a__=1e-5 , a__=None , a__=None , **a__ , ):
super().__init__(**a__ )
_lowerCAmelCase : Dict = image_size
_lowerCAmelCase : List[str] = patch_size
_lowerCAmelCase : Any = num_channels
_lowerCAmelCase : int = embed_dim
_lowerCAmelCase : Optional[Any] = depths
_lowerCAmelCase : List[str] = len(a__ )
_lowerCAmelCase : List[Any] = num_heads
_lowerCAmelCase : Tuple = window_size
_lowerCAmelCase : List[Any] = mlp_ratio
_lowerCAmelCase : Optional[Any] = qkv_bias
_lowerCAmelCase : int = hidden_dropout_prob
_lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCAmelCase : Any = drop_path_rate
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : Tuple = use_absolute_embeddings
_lowerCAmelCase : str = layer_norm_eps
_lowerCAmelCase : Any = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase : Union[str, Any] = int(embed_dim * 2 ** (len(a__ ) - 1) )
_lowerCAmelCase : int = ["""stem"""] + [F"stage{idx}" for idx in range(1 , len(a__ ) + 1 )]
_lowerCAmelCase , _lowerCAmelCase : int = get_aligned_output_features_output_indices(
out_features=a__ , out_indices=a__ , stage_names=self.stage_names )
| 126 | 0 |
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def __magic_name__ ( A : str ):
'''simple docstring'''
if isinstance(A, collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class snake_case__ :
"""simple docstring"""
def __UpperCAmelCase ( self : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] ) -> Optional[Any]:
pass
def __UpperCAmelCase ( self : Union[str, Any] ) -> Any:
pass
def __UpperCAmelCase ( self : int ) -> str:
pass
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any]=None , **__lowerCamelCase : List[Any] ) -> List[str]:
a = VisionTextDualEncoderConfig.from_vision_text_configs(__lowerCamelCase , __lowerCamelCase )
a = TFVisionTextDualEncoderModel(__lowerCamelCase )
a = model(input_ids=__lowerCamelCase , pixel_values=__lowerCamelCase , attention_mask=__lowerCamelCase )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim) )
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any]=None , **__lowerCamelCase : str ) -> List[Any]:
a , a = self.get_vision_text_model(__lowerCamelCase , __lowerCamelCase )
a = TFVisionTextDualEncoderModel(vision_model=__lowerCamelCase , text_model=__lowerCamelCase )
a = model(input_ids=__lowerCamelCase , pixel_values=__lowerCamelCase , attention_mask=__lowerCamelCase )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __UpperCAmelCase ( self : int , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : int=None , **__lowerCamelCase : Tuple ) -> Union[str, Any]:
a , a = self.get_vision_text_model(__lowerCamelCase , __lowerCamelCase )
a = {"vision_model": vision_model, "text_model": text_model}
a = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**__lowerCamelCase )
a = model(input_ids=__lowerCamelCase , pixel_values=__lowerCamelCase , attention_mask=__lowerCamelCase )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __UpperCAmelCase ( self : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int]=None , **__lowerCamelCase : Tuple ) -> List[Any]:
a , a = self.get_vision_text_model(__lowerCamelCase , __lowerCamelCase )
a = TFVisionTextDualEncoderModel(vision_model=__lowerCamelCase , text_model=__lowerCamelCase )
a = model(input_ids=__lowerCamelCase , pixel_values=__lowerCamelCase , attention_mask=__lowerCamelCase )
a = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCamelCase )
a = TFVisionTextDualEncoderModel.from_pretrained(__lowerCamelCase )
a = model(input_ids=__lowerCamelCase , pixel_values=__lowerCamelCase , attention_mask=__lowerCamelCase )
a = after_output[0].numpy()
a = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__lowerCamelCase , 1e-5 )
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any]=None , **__lowerCamelCase : Optional[int] ) -> Union[str, Any]:
a , a = self.get_vision_text_model(__lowerCamelCase , __lowerCamelCase )
a = TFVisionTextDualEncoderModel(vision_model=__lowerCamelCase , text_model=__lowerCamelCase )
a = model(
input_ids=__lowerCamelCase , pixel_values=__lowerCamelCase , attention_mask=__lowerCamelCase , output_attentions=__lowerCamelCase )
a = output.vision_model_output.attentions
self.assertEqual(len(__lowerCamelCase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
a = to_atuple(vision_model.config.image_size )
a = to_atuple(vision_model.config.patch_size )
a = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
a = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
a = output.text_model_output.attentions
self.assertEqual(len(__lowerCamelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : np.ndarray , __lowerCamelCase : np.ndarray , __lowerCamelCase : float ) -> str:
a = np.abs((a - b) ).max()
self.assertLessEqual(__lowerCamelCase , __lowerCamelCase , f"""Difference between torch and flax is {diff} (>= {tol}).""" )
def __UpperCAmelCase ( self : int ) -> Any:
a = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**__lowerCamelCase )
def __UpperCAmelCase ( self : int ) -> Dict:
a = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**__lowerCamelCase )
def __UpperCAmelCase ( self : Any ) -> Optional[Any]:
a = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**__lowerCamelCase )
def __UpperCAmelCase ( self : Any ) -> Union[str, Any]:
a = self.prepare_config_and_inputs()
self.check_save_load(**__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
a = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**__lowerCamelCase )
@slow
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
a , a = self.get_pretrained_model_and_inputs()
a = model_a(**__lowerCamelCase )
a = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(__lowerCamelCase )
a = TFVisionTextDualEncoderModel.from_pretrained(__lowerCamelCase )
a = model_a(**__lowerCamelCase )
a = after_outputs[0].numpy()
a = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__lowerCamelCase , 1e-5 )
@require_tf
class snake_case__ (_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
a = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-random-bert" )
a = 13
a = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
a = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
a = random_attention_mask([batch_size, 4] )
a = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> int:
a = TFViTModel(__lowerCamelCase , name="vision_model" )
a = TFBertModel(__lowerCamelCase , name="text_model" )
return vision_model, text_model
def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
a = TFViTModelTester(self )
a = TFBertModelTester(self )
a = vit_model_tester.prepare_config_and_inputs()
a = bert_model_tester.prepare_config_and_inputs()
a , a , a = vision_config_and_inputs
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class snake_case__ (_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Any ) -> List[str]:
# DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's
# just reinitialize it.
a = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"Rocketknight1/tiny-random-deit-tf" , "hf-internal-testing/tiny-random-roberta" )
a = 13
a = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
a = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
a = random_attention_mask([batch_size, 4] )
a = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def __UpperCAmelCase ( self : str , __lowerCamelCase : int , __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : str , __lowerCamelCase : List[str]=None , **__lowerCamelCase : List[Any] ) -> Dict:
a , a = self.get_vision_text_model(__lowerCamelCase , __lowerCamelCase )
a = TFVisionTextDualEncoderModel(vision_model=__lowerCamelCase , text_model=__lowerCamelCase )
a = model(
input_ids=__lowerCamelCase , pixel_values=__lowerCamelCase , attention_mask=__lowerCamelCase , output_attentions=__lowerCamelCase )
a = output.vision_model_output.attentions
self.assertEqual(len(__lowerCamelCase ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
a = to_atuple(vision_model.config.image_size )
a = to_atuple(vision_model.config.patch_size )
a = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
a = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
a = output.text_model_output.attentions
self.assertEqual(len(__lowerCamelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __UpperCAmelCase ( self : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] ) -> Any:
a = TFDeiTModel(__lowerCamelCase , name="vision_model" )
a = TFRobertaModel(__lowerCamelCase , name="text_model" )
return vision_model, text_model
def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
a = TFDeiTModelTester(self )
a = TFRobertaModelTester(self )
a = vit_model_tester.prepare_config_and_inputs()
a = bert_model_tester.prepare_config_and_inputs()
a , a , a = vision_config_and_inputs
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class snake_case__ (_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
a = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"Rocketknight1/tiny-random-clip-tf" , "hf-internal-testing/tiny-random-bert" )
a = 13
a = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
a = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
a = random_attention_mask([batch_size, 4] )
a = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] ) -> int:
a = TFCLIPVisionModel(__lowerCamelCase , name="vision_model" )
a = TFBertModel(__lowerCamelCase , name="text_model" )
return vision_model, text_model
def __UpperCAmelCase ( self : Tuple ) -> Dict:
a = TFCLIPVisionModelTester(self )
a = TFBertModelTester(self )
a = clip_model_tester.prepare_config_and_inputs()
a = bert_model_tester.prepare_config_and_inputs()
a , a = vision_config_and_inputs
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
@slow
def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
a = TFVisionTextDualEncoderModel.from_pretrained(
"clip-italian/clip-italian" , logit_scale_init_value=1.0 , from_pt=__lowerCamelCase )
a = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" )
a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
a = processor(
text=["una foto di un gatto", "una foto di un cane"] , images=__lowerCamelCase , padding=__lowerCamelCase , return_tensors="np" )
a = model(**__lowerCamelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
a = np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , __lowerCamelCase , atol=1e-3 ) )
| 107 |
"""simple docstring"""
import torch
from torch import nn
class _UpperCAmelCase( nn.Module ):
def __init__( self , __a , __a , __a , __a , __a=1 , __a=False) -> Tuple:
'''simple docstring'''
super().__init__()
_UpperCamelCase = n_token
_UpperCamelCase = d_embed
_UpperCamelCase = d_proj
_UpperCamelCase = cutoffs + [n_token]
_UpperCamelCase = [0] + self.cutoffs
_UpperCamelCase = div_val
_UpperCamelCase = self.cutoffs[0]
_UpperCamelCase = len(self.cutoffs) - 1
_UpperCamelCase = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
_UpperCamelCase = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed))
_UpperCamelCase = nn.Parameter(torch.zeros(self.n_clusters))
_UpperCamelCase = nn.ModuleList()
_UpperCamelCase = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs)):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(__a , __a)))
else:
self.out_projs.append(__a)
self.out_layers.append(nn.Linear(__a , __a))
else:
for i in range(len(self.cutoffs)):
_UpperCamelCase , _UpperCamelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_UpperCamelCase = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(__a , __a)))
self.out_layers.append(nn.Linear(__a , r_idx - l_idx))
_UpperCamelCase = keep_order
def UpperCAmelCase ( self , __a , __a , __a , __a) -> Any:
'''simple docstring'''
if proj is None:
_UpperCamelCase = nn.functional.linear(__a , __a , bias=__a)
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
_UpperCamelCase = nn.functional.linear(__a , proj.t().contiguous())
_UpperCamelCase = nn.functional.linear(__a , __a , bias=__a)
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def UpperCAmelCase ( self , __a , __a=None , __a=False) -> Dict:
'''simple docstring'''
if labels is not None:
# Shift so that tokens < n predict n
_UpperCamelCase = hidden[..., :-1, :].contiguous()
_UpperCamelCase = labels[..., 1:].contiguous()
_UpperCamelCase = hidden.view(-1 , hidden.size(-1))
_UpperCamelCase = labels.view(-1)
if hidden.size(0) != labels.size(0):
raise RuntimeError('''Input and labels should have the same size in the batch dimension.''')
else:
_UpperCamelCase = hidden.view(-1 , hidden.size(-1))
if self.n_clusters == 0:
_UpperCamelCase = self._compute_logit(__a , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0])
if labels is not None:
_UpperCamelCase = labels != -1_00
_UpperCamelCase = torch.zeros_like(__a , dtype=hidden.dtype , device=hidden.device)
_UpperCamelCase = (
-nn.functional.log_softmax(__a , dim=-1)[mask].gather(1 , labels[mask].unsqueeze(1)).squeeze(1)
)
else:
_UpperCamelCase = nn.functional.log_softmax(__a , dim=-1)
else:
# construct weights and biases
_UpperCamelCase , _UpperCamelCase = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
_UpperCamelCase , _UpperCamelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_UpperCamelCase = self.out_layers[0].weight[l_idx:r_idx]
_UpperCamelCase = self.out_layers[0].bias[l_idx:r_idx]
else:
_UpperCamelCase = self.out_layers[i].weight
_UpperCamelCase = self.out_layers[i].bias
if i == 0:
_UpperCamelCase = torch.cat([weight_i, self.cluster_weight] , dim=0)
_UpperCamelCase = torch.cat([bias_i, self.cluster_bias] , dim=0)
weights.append(__a)
biases.append(__a)
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = weights[0], biases[0], self.out_projs[0]
_UpperCamelCase = self._compute_logit(__a , __a , __a , __a)
_UpperCamelCase = nn.functional.log_softmax(__a , dim=1)
if labels is None:
_UpperCamelCase = hidden.new_empty((head_logit.size(0), self.n_token))
else:
_UpperCamelCase = torch.zeros_like(__a , dtype=hidden.dtype , device=hidden.device)
_UpperCamelCase = 0
_UpperCamelCase = [0] + self.cutoffs
for i in range(len(__a) - 1):
_UpperCamelCase , _UpperCamelCase = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
_UpperCamelCase = (labels >= l_idx) & (labels < r_idx)
_UpperCamelCase = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
_UpperCamelCase = labels.index_select(0 , __a) - l_idx
_UpperCamelCase = head_logprob.index_select(0 , __a)
_UpperCamelCase = hidden.index_select(0 , __a)
else:
_UpperCamelCase = hidden
if i == 0:
if labels is not None:
_UpperCamelCase = head_logprob_i.gather(1 , target_i[:, None]).squeeze(1)
else:
_UpperCamelCase = head_logprob[:, : self.cutoffs[0]]
else:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = weights[i], biases[i], self.out_projs[i]
_UpperCamelCase = self._compute_logit(__a , __a , __a , __a)
_UpperCamelCase = nn.functional.log_softmax(__a , dim=1)
_UpperCamelCase = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
_UpperCamelCase = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None]).squeeze(1)
else:
_UpperCamelCase = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
_UpperCamelCase = logprob_i
if labels is not None:
if (hasattr(self , '''keep_order''') and self.keep_order) or keep_order:
out.index_copy_(0 , __a , -logprob_i)
else:
out[offset : offset + logprob_i.size(0)].copy_(-logprob_i)
offset += logprob_i.size(0)
return out
def UpperCAmelCase ( self , __a) -> List[str]:
'''simple docstring'''
if self.n_clusters == 0:
_UpperCamelCase = self._compute_logit(__a , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0])
return nn.functional.log_softmax(__a , dim=-1)
else:
# construct weights and biases
_UpperCamelCase , _UpperCamelCase = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
_UpperCamelCase , _UpperCamelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_UpperCamelCase = self.out_layers[0].weight[l_idx:r_idx]
_UpperCamelCase = self.out_layers[0].bias[l_idx:r_idx]
else:
_UpperCamelCase = self.out_layers[i].weight
_UpperCamelCase = self.out_layers[i].bias
if i == 0:
_UpperCamelCase = torch.cat([weight_i, self.cluster_weight] , dim=0)
_UpperCamelCase = torch.cat([bias_i, self.cluster_bias] , dim=0)
weights.append(__a)
biases.append(__a)
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = weights[0], biases[0], self.out_projs[0]
_UpperCamelCase = self._compute_logit(__a , __a , __a , __a)
_UpperCamelCase = hidden.new_empty((head_logit.size(0), self.n_token))
_UpperCamelCase = nn.functional.log_softmax(__a , dim=1)
_UpperCamelCase = [0] + self.cutoffs
for i in range(len(__a) - 1):
_UpperCamelCase , _UpperCamelCase = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
_UpperCamelCase = head_logprob[:, : self.cutoffs[0]]
else:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = weights[i], biases[i], self.out_projs[i]
_UpperCamelCase = self._compute_logit(__a , __a , __a , __a)
_UpperCamelCase = nn.functional.log_softmax(__a , dim=1)
_UpperCamelCase = head_logprob[:, -i] + tail_logprob_i
_UpperCamelCase = logprob_i
return out
| 194 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
__lowercase: Any = "\nHuman: <<task>>\n\nAssistant: "
__lowercase: Dict = "huggingface-tools/default-prompts"
__lowercase: Any = {"chat": "chat_prompt_template.txt", "run": "run_prompt_template.txt"}
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[str] , _UpperCamelCase : int , _UpperCamelCase : Tuple="run" ) -> str:
'''simple docstring'''
if prompt_or_repo_id is None:
UpperCamelCase__ = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("\\s" , _UpperCamelCase ) is not None:
return prompt_or_repo_id
UpperCamelCase__ = cached_file(
_UpperCamelCase , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} )
with open(_UpperCamelCase , "r" , encoding="utf-8" ) as f:
return f.read() | 31 |
'''simple docstring'''
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Union[str, Any] = CLIPTokenizer
_lowerCamelCase : Dict = CLIPTokenizerFast
_lowerCamelCase : int = True
_lowerCamelCase : Tuple = {}
_lowerCamelCase : Tuple = False
def lowercase_ ( self : Tuple ):
"""simple docstring"""
super().setUp()
# fmt: off
UpperCamelCase__ = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
UpperCamelCase__ = dict(zip(a_, range(len(a_ ) ) ) )
UpperCamelCase__ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
UpperCamelCase__ = {"unk_token": "<unk>"}
UpperCamelCase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as fp:
fp.write(json.dumps(a_ ) + "\n" )
with open(self.merges_file, "w", encoding="utf-8" ) as fp:
fp.write("\n".join(a_ ) )
def lowercase_ ( self : Optional[Any], **a_ : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname, **a_ )
def lowercase_ ( self : str, **a_ : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **a_ )
def lowercase_ ( self : List[Any], a_ : Dict ):
"""simple docstring"""
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = "lower newer"
return input_text, output_text
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = CLIPTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
UpperCamelCase__ = tokenizer.tokenize(a_ )
self.assertListEqual(a_, a_ )
UpperCamelCase__ = tokens + [tokenizer.unk_token]
UpperCamelCase__ = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ), a_ )
@require_ftfy
def lowercase_ ( self : Dict ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCamelCase__ = self.tokenizer_class.from_pretrained(a_, **a_ )
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(a_, **a_ )
UpperCamelCase__ = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
UpperCamelCase__ = "xa\u0303y" + " " + "x\xe3y"
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on unicode of space type
UpperCamelCase__ = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on unicode of line break type
UpperCamelCase__ = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCamelCase__ = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
UpperCamelCase__ = f'{text_of_1_token} {text_of_1_token}'
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
a_, use_fast=a_, )
UpperCamelCase__ = tokenizer_r(a_, return_offsets_mapping=a_, add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0], (0, len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1], (len(a_ ) + 1, len(a_ ) + 1 + len(a_ )), )
UpperCamelCase__ = f' {text}'
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
a_, use_fast=a_, )
UpperCamelCase__ = tokenizer_r(a_, return_offsets_mapping=a_, add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(a_ ) + 1, 1 + len(a_ ) + 1 + len(a_ )), )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
with self.assertRaises(a_ ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
super().test_tokenization_python_rust_equals()
def lowercase_ ( self : List[str] ):
"""simple docstring"""
pass | 31 | 1 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = OrderedDict(
[
# Base model mapping
('''albert''', '''FlaxAlbertModel'''),
('''bart''', '''FlaxBartModel'''),
('''beit''', '''FlaxBeitModel'''),
('''bert''', '''FlaxBertModel'''),
('''big_bird''', '''FlaxBigBirdModel'''),
('''blenderbot''', '''FlaxBlenderbotModel'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''),
('''clip''', '''FlaxCLIPModel'''),
('''distilbert''', '''FlaxDistilBertModel'''),
('''electra''', '''FlaxElectraModel'''),
('''gpt-sw3''', '''FlaxGPT2Model'''),
('''gpt2''', '''FlaxGPT2Model'''),
('''gpt_neo''', '''FlaxGPTNeoModel'''),
('''gptj''', '''FlaxGPTJModel'''),
('''longt5''', '''FlaxLongT5Model'''),
('''marian''', '''FlaxMarianModel'''),
('''mbart''', '''FlaxMBartModel'''),
('''mt5''', '''FlaxMT5Model'''),
('''opt''', '''FlaxOPTModel'''),
('''pegasus''', '''FlaxPegasusModel'''),
('''regnet''', '''FlaxRegNetModel'''),
('''resnet''', '''FlaxResNetModel'''),
('''roberta''', '''FlaxRobertaModel'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''),
('''roformer''', '''FlaxRoFormerModel'''),
('''t5''', '''FlaxT5Model'''),
('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''),
('''vit''', '''FlaxViTModel'''),
('''wav2vec2''', '''FlaxWav2Vec2Model'''),
('''whisper''', '''FlaxWhisperModel'''),
('''xglm''', '''FlaxXGLMModel'''),
('''xlm-roberta''', '''FlaxXLMRobertaModel'''),
]
)
UpperCAmelCase__ = OrderedDict(
[
# Model for pre-training mapping
('''albert''', '''FlaxAlbertForPreTraining'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForPreTraining'''),
('''big_bird''', '''FlaxBigBirdForPreTraining'''),
('''electra''', '''FlaxElectraForPreTraining'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
UpperCAmelCase__ = OrderedDict(
[
# Model for Masked LM mapping
('''albert''', '''FlaxAlbertForMaskedLM'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForMaskedLM'''),
('''big_bird''', '''FlaxBigBirdForMaskedLM'''),
('''distilbert''', '''FlaxDistilBertForMaskedLM'''),
('''electra''', '''FlaxElectraForMaskedLM'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
UpperCAmelCase__ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''),
('''encoder-decoder''', '''FlaxEncoderDecoderModel'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''marian''', '''FlaxMarianMTModel'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''pegasus''', '''FlaxPegasusForConditionalGeneration'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
]
)
UpperCAmelCase__ = OrderedDict(
[
# Model for Image-classsification
('''beit''', '''FlaxBeitForImageClassification'''),
('''regnet''', '''FlaxRegNetForImageClassification'''),
('''resnet''', '''FlaxResNetForImageClassification'''),
('''vit''', '''FlaxViTForImageClassification'''),
]
)
UpperCAmelCase__ = OrderedDict(
[
('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''),
]
)
UpperCAmelCase__ = OrderedDict(
[
# Model for Causal LM mapping
('''bart''', '''FlaxBartForCausalLM'''),
('''bert''', '''FlaxBertForCausalLM'''),
('''big_bird''', '''FlaxBigBirdForCausalLM'''),
('''electra''', '''FlaxElectraForCausalLM'''),
('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''),
('''gpt2''', '''FlaxGPT2LMHeadModel'''),
('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''),
('''gptj''', '''FlaxGPTJForCausalLM'''),
('''opt''', '''FlaxOPTForCausalLM'''),
('''roberta''', '''FlaxRobertaForCausalLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''),
('''xglm''', '''FlaxXGLMForCausalLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''),
]
)
UpperCAmelCase__ = OrderedDict(
[
# Model for Sequence Classification mapping
('''albert''', '''FlaxAlbertForSequenceClassification'''),
('''bart''', '''FlaxBartForSequenceClassification'''),
('''bert''', '''FlaxBertForSequenceClassification'''),
('''big_bird''', '''FlaxBigBirdForSequenceClassification'''),
('''distilbert''', '''FlaxDistilBertForSequenceClassification'''),
('''electra''', '''FlaxElectraForSequenceClassification'''),
('''mbart''', '''FlaxMBartForSequenceClassification'''),
('''roberta''', '''FlaxRobertaForSequenceClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''),
('''roformer''', '''FlaxRoFormerForSequenceClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''),
]
)
UpperCAmelCase__ = OrderedDict(
[
# Model for Question Answering mapping
('''albert''', '''FlaxAlbertForQuestionAnswering'''),
('''bart''', '''FlaxBartForQuestionAnswering'''),
('''bert''', '''FlaxBertForQuestionAnswering'''),
('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''),
('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''),
('''electra''', '''FlaxElectraForQuestionAnswering'''),
('''mbart''', '''FlaxMBartForQuestionAnswering'''),
('''roberta''', '''FlaxRobertaForQuestionAnswering'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''),
('''roformer''', '''FlaxRoFormerForQuestionAnswering'''),
('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''),
]
)
UpperCAmelCase__ = OrderedDict(
[
# Model for Token Classification mapping
('''albert''', '''FlaxAlbertForTokenClassification'''),
('''bert''', '''FlaxBertForTokenClassification'''),
('''big_bird''', '''FlaxBigBirdForTokenClassification'''),
('''distilbert''', '''FlaxDistilBertForTokenClassification'''),
('''electra''', '''FlaxElectraForTokenClassification'''),
('''roberta''', '''FlaxRobertaForTokenClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''),
('''roformer''', '''FlaxRoFormerForTokenClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''),
]
)
UpperCAmelCase__ = OrderedDict(
[
# Model for Multiple Choice mapping
('''albert''', '''FlaxAlbertForMultipleChoice'''),
('''bert''', '''FlaxBertForMultipleChoice'''),
('''big_bird''', '''FlaxBigBirdForMultipleChoice'''),
('''distilbert''', '''FlaxDistilBertForMultipleChoice'''),
('''electra''', '''FlaxElectraForMultipleChoice'''),
('''roberta''', '''FlaxRobertaForMultipleChoice'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''),
('''roformer''', '''FlaxRoFormerForMultipleChoice'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''),
]
)
UpperCAmelCase__ = OrderedDict(
[
('''bert''', '''FlaxBertForNextSentencePrediction'''),
]
)
UpperCAmelCase__ = OrderedDict(
[
('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
]
)
UpperCAmelCase__ = OrderedDict(
[
('''whisper''', '''FlaxWhisperForAudioClassification'''),
]
)
UpperCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
UpperCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
UpperCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
UpperCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
UpperCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
UpperCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
UpperCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
UpperCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
UpperCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
UpperCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
UpperCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class lowerCamelCase__ ( _BaseAutoModelClass):
SCREAMING_SNAKE_CASE__ = FLAX_MODEL_MAPPING
UpperCAmelCase__ = auto_class_update(FlaxAutoModel)
class lowerCamelCase__ ( _BaseAutoModelClass):
SCREAMING_SNAKE_CASE__ = FLAX_MODEL_FOR_PRETRAINING_MAPPING
UpperCAmelCase__ = auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''')
class lowerCamelCase__ ( _BaseAutoModelClass):
SCREAMING_SNAKE_CASE__ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
UpperCAmelCase__ = auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''')
class lowerCamelCase__ ( _BaseAutoModelClass):
SCREAMING_SNAKE_CASE__ = FLAX_MODEL_FOR_MASKED_LM_MAPPING
UpperCAmelCase__ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''')
class lowerCamelCase__ ( _BaseAutoModelClass):
SCREAMING_SNAKE_CASE__ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCAmelCase__ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base'''
)
class lowerCamelCase__ ( _BaseAutoModelClass):
SCREAMING_SNAKE_CASE__ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCAmelCase__ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='''sequence classification'''
)
class lowerCamelCase__ ( _BaseAutoModelClass):
SCREAMING_SNAKE_CASE__ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
UpperCAmelCase__ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''')
class lowerCamelCase__ ( _BaseAutoModelClass):
SCREAMING_SNAKE_CASE__ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCAmelCase__ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='''token classification'''
)
class lowerCamelCase__ ( _BaseAutoModelClass):
SCREAMING_SNAKE_CASE__ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
UpperCAmelCase__ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''')
class lowerCamelCase__ ( _BaseAutoModelClass):
SCREAMING_SNAKE_CASE__ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
UpperCAmelCase__ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction'''
)
class lowerCamelCase__ ( _BaseAutoModelClass):
SCREAMING_SNAKE_CASE__ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
UpperCAmelCase__ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='''image classification'''
)
class lowerCamelCase__ ( _BaseAutoModelClass):
SCREAMING_SNAKE_CASE__ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCAmelCase__ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''')
class lowerCamelCase__ ( _BaseAutoModelClass):
SCREAMING_SNAKE_CASE__ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
UpperCAmelCase__ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling'''
)
| 5 |
'''simple docstring'''
def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
lowercase_ : List[str] = len(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = []
for i in range(len(__SCREAMING_SNAKE_CASE ) - pat_len + 1 ):
lowercase_ : Tuple = True
for j in range(__SCREAMING_SNAKE_CASE ):
if s[i + j] != pattern[j]:
lowercase_ : List[str] = False
break
if match_found:
position.append(__SCREAMING_SNAKE_CASE )
return position
if __name__ == "__main__":
assert naive_pattern_search("ABCDEFG", "DE") == [3]
print(naive_pattern_search("ABAAABCDBBABCDDEBCABC", "ABC"))
| 93 | 0 |
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __a ( UpperCAmelCase ):
_a : Any = ['image_processor', 'tokenizer']
_a : List[str] = 'AutoImageProcessor'
_a : Tuple = 'AutoTokenizer'
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = kwargs.pop('feature_extractor' )
_UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.image_processor
_UpperCAmelCase = False
def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = kwargs.pop('images' , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = kwargs.pop('text' , _SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
_UpperCAmelCase = args[0]
_UpperCAmelCase = args[1:]
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
_UpperCAmelCase = self.image_processor(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is not None:
_UpperCAmelCase = self.tokenizer(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is None:
return inputs
elif images is None:
return encodings
else:
_UpperCAmelCase = encodings['input_ids']
return inputs
def UpperCAmelCase__ ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@contextmanager
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your images inputs, or in a separate call.' )
_UpperCAmelCase = True
_UpperCAmelCase = self.tokenizer
yield
_UpperCAmelCase = self.image_processor
_UpperCAmelCase = False
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=None ) -> Optional[Any]:
"""simple docstring"""
if added_vocab is None:
_UpperCAmelCase = self.tokenizer.get_added_vocab()
_UpperCAmelCase = {}
while tokens:
_UpperCAmelCase = re.search(R'<s_(.*?)>' , _SCREAMING_SNAKE_CASE , re.IGNORECASE )
if start_token is None:
break
_UpperCAmelCase = start_token.group(1 )
_UpperCAmelCase = re.search(Rf'''</s_{key}>''' , _SCREAMING_SNAKE_CASE , re.IGNORECASE )
_UpperCAmelCase = start_token.group()
if end_token is None:
_UpperCAmelCase = tokens.replace(_SCREAMING_SNAKE_CASE , '' )
else:
_UpperCAmelCase = end_token.group()
_UpperCAmelCase = re.escape(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = re.escape(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = re.search(f'''{start_token_escaped}(.*?){end_token_escaped}''' , _SCREAMING_SNAKE_CASE , re.IGNORECASE )
if content is not None:
_UpperCAmelCase = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
_UpperCAmelCase = self.tokenajson(_SCREAMING_SNAKE_CASE , is_inner_value=_SCREAMING_SNAKE_CASE , added_vocab=_SCREAMING_SNAKE_CASE )
if value:
if len(_SCREAMING_SNAKE_CASE ) == 1:
_UpperCAmelCase = value[0]
_UpperCAmelCase = value
else: # leaf nodes
_UpperCAmelCase = []
for leaf in content.split(R'<sep/>' ):
_UpperCAmelCase = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
_UpperCAmelCase = leaf[1:-2] # for categorical special tokens
output[key].append(_SCREAMING_SNAKE_CASE )
if len(output[key] ) == 1:
_UpperCAmelCase = output[key][0]
_UpperCAmelCase = tokens[tokens.find(_SCREAMING_SNAKE_CASE ) + len(_SCREAMING_SNAKE_CASE ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=_SCREAMING_SNAKE_CASE , added_vocab=_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _SCREAMING_SNAKE_CASE , )
return self.image_processor_class
@property
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _SCREAMING_SNAKE_CASE , )
return self.image_processor
| 185 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ :Any = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :Optional[Any] = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :str = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :List[str] = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :str = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :List[Any] = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ :Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 185 | 1 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
lowercase_ = 'examples/'
lowercase_ = {
'examples': (re.compile(R'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(R'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(R'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), R'\1version="VERSION",'),
'doc': (re.compile(R'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
lowercase_ = {
'init': 'src/diffusers/__init__.py',
'setup': 'setup.py',
}
lowercase_ = 'README.md'
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
with open(__UpperCamelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__A = f.read()
__A , __A = REPLACE_PATTERNS[pattern]
__A = replace.replace('''VERSION''' , __UpperCamelCase )
__A = re_pattern.sub(__UpperCamelCase , __UpperCamelCase )
with open(__UpperCamelCase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(__UpperCamelCase )
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
for folder, directories, fnames in os.walk(__UpperCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase , pattern='''examples''' )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase=False ):
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if not patch:
update_version_in_examples(__UpperCamelCase )
def lowerCAmelCase ( ):
"""simple docstring"""
__A = '''🤗 Transformers currently provides the following architectures'''
__A = '''1. Want to contribute a new model?'''
with open(__UpperCamelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__A = f.readlines()
# Find the start of the list.
__A = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__A = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
__A = lines[index].replace(
'''https://huggingface.co/docs/diffusers/main/model_doc''' , '''https://huggingface.co/docs/diffusers/model_doc''' , )
index += 1
with open(__UpperCamelCase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(__UpperCamelCase )
def lowerCAmelCase ( ):
"""simple docstring"""
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
__A = f.read()
__A = REPLACE_PATTERNS['''init'''][0].search(__UpperCamelCase ).groups()[0]
return packaging.version.parse(__UpperCamelCase )
def lowerCAmelCase ( __UpperCamelCase=False ):
"""simple docstring"""
__A = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
__A = default_version.base_version
elif patch:
__A = f'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
__A = f'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
__A = input(f'Which version are you releasing? [{default_version}]' )
if len(__UpperCamelCase ) == 0:
__A = default_version
print(f'Updating version to {version}.' )
global_version_update(__UpperCamelCase , patch=__UpperCamelCase )
def lowerCAmelCase ( ):
"""simple docstring"""
__A = get_version()
__A = f'{current_version.major}.{current_version.minor + 1}.0.dev0'
__A = current_version.base_version
# Check with the user we got that right.
__A = input(f'Which version are we developing now? [{dev_version}]' )
if len(__UpperCamelCase ) == 0:
__A = dev_version
print(f'Updating version to {version}.' )
global_version_update(__UpperCamelCase )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
lowercase_ = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 266 |
"""simple docstring"""
import re
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
return [char.split() for char in re.split(r'''[^ a-z A-Z 0-9 \s]''' , str_ )]
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
__A = split_input(str_ )
return "".join(
[''''''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
try:
__A = split_input(__UpperCamelCase )
if upper:
__A = ''''''.join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
__A = ''''''.join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
return to_simple_case(__UpperCamelCase )
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
try:
__A = to_simple_case(__UpperCamelCase )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
return to_complex_case(__UpperCamelCase , __UpperCamelCase , '''_''' )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
return to_complex_case(__UpperCamelCase , __UpperCamelCase , '''-''' )
if __name__ == "__main__":
__import__('doctest').testmod()
| 266 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCAmelCase_ = logging.get_logger(__name__)
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
a : int = ["pixel_values"]
def __init__( self, __magic_name__ = True, __magic_name__ = None, __magic_name__ = 0.9, __magic_name__ = PILImageResampling.BICUBIC, __magic_name__ = True, __magic_name__ = None, __magic_name__ = 1 / 255, __magic_name__ = True, __magic_name__ = True, __magic_name__ = None, __magic_name__ = None, **__magic_name__, ) -> None:
"""simple docstring"""
super().__init__(**__magic_name__ )
UpperCamelCase__ : List[str] = size if size is not None else {'''shortest_edge''': 224}
UpperCamelCase__ : List[str] = get_size_dict(__magic_name__, default_to_square=__magic_name__ )
UpperCamelCase__ : List[str] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
UpperCamelCase__ : str = get_size_dict(__magic_name__, param_name='''crop_size''' )
UpperCamelCase__ : List[Any] = do_resize
UpperCamelCase__ : Dict = size
UpperCamelCase__ : List[str] = crop_pct
UpperCamelCase__ : List[str] = resample
UpperCamelCase__ : Optional[Any] = do_center_crop
UpperCamelCase__ : str = crop_size
UpperCamelCase__ : List[str] = do_rescale
UpperCamelCase__ : Any = rescale_factor
UpperCamelCase__ : int = do_normalize
UpperCamelCase__ : int = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
UpperCamelCase__ : List[Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ = None, __magic_name__ = PILImageResampling.BICUBIC, __magic_name__ = None, **__magic_name__, ) -> np.ndarray:
"""simple docstring"""
UpperCamelCase__ : List[Any] = get_size_dict(__magic_name__, default_to_square=__magic_name__ )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(f"size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
if crop_pct is not None:
if "shortest_edge" in size:
UpperCamelCase__ : List[Any] = int(size['''shortest_edge'''] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
UpperCamelCase__ : Optional[int] = int(size['''height'''] / crop_pct )
else:
UpperCamelCase__ : str = (int(size['''height'''] / crop_pct ), int(size['''width'''] / crop_pct ))
else:
raise ValueError('''Invalid size for resize: {}'''.format(__magic_name__ ) )
UpperCamelCase__ : Any = get_resize_output_image_size(__magic_name__, size=__magic_name__, default_to_square=__magic_name__ )
else:
if "shortest_edge" in size:
UpperCamelCase__ : int = get_resize_output_image_size(__magic_name__, size=size['''shortest_edge'''], default_to_square=__magic_name__ )
elif "height" in size and "width" in size:
UpperCamelCase__ : Union[str, Any] = (size['''height'''], size['''width'''])
else:
raise ValueError('''Invalid size for resize: {}'''.format(__magic_name__ ) )
return resize(__magic_name__, size=__magic_name__, resample=__magic_name__, data_format=__magic_name__, **__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ = None, **__magic_name__, ) -> np.ndarray:
"""simple docstring"""
UpperCamelCase__ : Any = get_size_dict(__magic_name__ )
if "height" not in size or "width" not in size:
raise ValueError(f"size must contain 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(__magic_name__, size=(size['''height'''], size['''width''']), data_format=__magic_name__, **__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ = None, **__magic_name__, ) -> Optional[Any]:
"""simple docstring"""
return rescale(__magic_name__, scale=__magic_name__, data_format=__magic_name__, **__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__ = None, **__magic_name__, ) -> np.ndarray:
"""simple docstring"""
return normalize(__magic_name__, mean=__magic_name__, std=__magic_name__, data_format=__magic_name__, **__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = None, __magic_name__ = None, __magic_name__ = None, __magic_name__ = None, __magic_name__ = None, __magic_name__ = None, __magic_name__ = None, __magic_name__ = None, __magic_name__ = None, __magic_name__ = None, __magic_name__ = None, __magic_name__ = None, __magic_name__ = ChannelDimension.FIRST, **__magic_name__, ) -> PIL.Image.Image:
"""simple docstring"""
UpperCamelCase__ : Any = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ : List[Any] = crop_pct if crop_pct is not None else self.crop_pct
UpperCamelCase__ : Tuple = resample if resample is not None else self.resample
UpperCamelCase__ : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase__ : Any = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__ : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase__ : Optional[int] = image_mean if image_mean is not None else self.image_mean
UpperCamelCase__ : Tuple = image_std if image_std is not None else self.image_std
UpperCamelCase__ : Tuple = size if size is not None else self.size
UpperCamelCase__ : str = get_size_dict(__magic_name__, default_to_square=__magic_name__ )
UpperCamelCase__ : Any = crop_size if crop_size is not None else self.crop_size
UpperCamelCase__ : str = get_size_dict(__magic_name__, param_name='''crop_size''' )
UpperCamelCase__ : Tuple = make_list_of_images(__magic_name__ )
if not valid_images(__magic_name__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_pct is None:
raise ValueError('''Crop_pct must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
UpperCamelCase__ : List[Any] = [to_numpy_array(__magic_name__ ) for image in images]
if do_resize:
UpperCamelCase__ : Tuple = [self.resize(image=__magic_name__, size=__magic_name__, crop_pct=__magic_name__, resample=__magic_name__ ) for image in images]
if do_center_crop:
UpperCamelCase__ : Optional[Any] = [self.center_crop(image=__magic_name__, size=__magic_name__ ) for image in images]
if do_rescale:
UpperCamelCase__ : Any = [self.rescale(image=__magic_name__, scale=__magic_name__ ) for image in images]
if do_normalize:
UpperCamelCase__ : List[str] = [self.normalize(image=__magic_name__, mean=__magic_name__, std=__magic_name__ ) for image in images]
UpperCamelCase__ : str = [to_channel_dimension_format(__magic_name__, __magic_name__ ) for image in images]
UpperCamelCase__ : List[str] = {'''pixel_values''': images}
return BatchFeature(data=__magic_name__, tensor_type=__magic_name__ )
| 247 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class lowercase__ ( nn.Module ):
'''simple docstring'''
def __init__( self, __magic_name__ = 16, __magic_name__ = 88, __magic_name__ = None, __magic_name__ = 1, __magic_name__ = 0.0, __magic_name__ = 32, __magic_name__ = None, __magic_name__ = False, __magic_name__ = None, __magic_name__ = None, __magic_name__ = "geglu", __magic_name__ = None, ) -> Tuple:
"""simple docstring"""
super().__init__()
UpperCamelCase__ : str = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__magic_name__, attention_head_dim=__magic_name__, in_channels=__magic_name__, num_layers=__magic_name__, dropout=__magic_name__, norm_num_groups=__magic_name__, cross_attention_dim=__magic_name__, attention_bias=__magic_name__, sample_size=__magic_name__, num_vector_embeds=__magic_name__, activation_fn=__magic_name__, num_embeds_ada_norm=__magic_name__, )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
UpperCamelCase__ : Any = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
UpperCamelCase__ : Optional[Any] = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
UpperCamelCase__ : int = [1, 0]
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__=None, __magic_name__=None, __magic_name__=None, __magic_name__ = True, ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = hidden_states
UpperCamelCase__ : Tuple = []
UpperCamelCase__ : int = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
UpperCamelCase__ : List[Any] = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
UpperCamelCase__ : List[str] = self.transformer_index_for_condition[i]
UpperCamelCase__ : List[str] = self.transformers[transformer_index](
__magic_name__, encoder_hidden_states=__magic_name__, timestep=__magic_name__, cross_attention_kwargs=__magic_name__, return_dict=__magic_name__, )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
UpperCamelCase__ : List[str] = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
UpperCamelCase__ : int = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__magic_name__ )
| 247 | 1 |
'''simple docstring'''
A__: List[Any] = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[Any]:
_a : int =input("""Enter message: """ )
_a : Optional[int] =input("""Enter key [alphanumeric]: """ )
_a : str =input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
_a : Union[str, Any] ="encrypt"
_a : Optional[Any] =encrypt_message(_lowerCamelCase ,_lowerCamelCase )
elif mode.lower().startswith("""d""" ):
_a : Union[str, Any] ="decrypt"
_a : str =decrypt_message(_lowerCamelCase ,_lowerCamelCase )
print(F"\n{mode.title()}ed message:" )
print(_lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ) -> Dict:
return translate_message(_lowerCamelCase ,_lowerCamelCase ,"""encrypt""" )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ) -> int:
return translate_message(_lowerCamelCase ,_lowerCamelCase ,"""decrypt""" )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ,_UpperCAmelCase : str ) -> Union[str, Any]:
_a : Union[str, Any] =[]
_a : List[Any] =0
_a : int =key.upper()
for symbol in message:
_a : Tuple =LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(_lowerCamelCase )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(_lowerCamelCase ):
_a : Dict =0
else:
translated.append(_lowerCamelCase )
return "".join(_lowerCamelCase )
if __name__ == "__main__":
main()
| 276 | import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase = 256
class snake_case_ ( __A ):
__A : str = ["melgan"]
def __init__( self : str , lowercase_ : SpectrogramNotesEncoder , lowercase_ : SpectrogramContEncoder , lowercase_ : TaFilmDecoder , lowercase_ : DDPMScheduler , lowercase_ : OnnxRuntimeModel if is_onnx_available() else Any , ) -> None:
super().__init__()
# From MELGAN
lowercase__ : List[Any] = math.log(1E-5 ) # Matches MelGAN training.
lowercase__ : str = 4.0 # Largest value for most examples
lowercase__ : Any = 1_28
self.register_modules(
notes_encoder=lowercase_ , continuous_encoder=lowercase_ , decoder=lowercase_ , scheduler=lowercase_ , melgan=lowercase_ , )
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str]=(-1.0, 1.0) , lowercase_ : Dict=False ) -> Optional[Any]:
lowercase__ , lowercase__ : int = output_range
if clip:
lowercase__ : Optional[Any] = torch.clip(lowercase_ , self.min_value , self.max_value )
# Scale to [0, 1].
lowercase__ : List[str] = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def __UpperCamelCase ( self : Optional[int] , lowercase_ : List[str] , lowercase_ : List[str]=(-1.0, 1.0) , lowercase_ : List[Any]=False ) -> Union[str, Any]:
lowercase__ , lowercase__ : Tuple = input_range
lowercase__ : Optional[Any] = torch.clip(lowercase_ , lowercase_ , lowercase_ ) if clip else outputs
# Scale to [0, 1].
lowercase__ : Union[str, Any] = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def __UpperCamelCase ( self : List[str] , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Tuple ) -> List[str]:
lowercase__ : Optional[Any] = input_tokens > 0
lowercase__ , lowercase__ : int = self.notes_encoder(
encoder_input_tokens=lowercase_ , encoder_inputs_mask=lowercase_ )
lowercase__ , lowercase__ : List[Any] = self.continuous_encoder(
encoder_inputs=lowercase_ , encoder_inputs_mask=lowercase_ )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : str ) -> Tuple:
lowercase__ : Union[str, Any] = noise_time
if not torch.is_tensor(lowercase_ ):
lowercase__ : Optional[Any] = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(lowercase_ ) and len(timesteps.shape ) == 0:
lowercase__ : Optional[Any] = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowercase__ : int = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
lowercase__ : str = self.decoder(
encodings_and_masks=lowercase_ , decoder_input_tokens=lowercase_ , decoder_noise_time=lowercase_ )
return logits
@torch.no_grad()
def __call__( self : List[str] , lowercase_ : List[List[int]] , lowercase_ : Optional[torch.Generator] = None , lowercase_ : int = 1_00 , lowercase_ : bool = True , lowercase_ : str = "numpy" , lowercase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowercase_ : int = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowercase_ , lowercase_ ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(lowercase_ )}.''' )
lowercase__ : str = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
lowercase__ : Optional[int] = np.zeros([1, 0, self.n_dims] , np.floataa )
lowercase__ : str = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=lowercase_ , device=self.device )
for i, encoder_input_tokens in enumerate(lowercase_ ):
if i == 0:
lowercase__ : Union[str, Any] = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
lowercase__ : List[str] = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=lowercase_ , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
lowercase__ : str = ones
lowercase__ : str = self.scale_features(
lowercase_ , output_range=[-1.0, 1.0] , clip=lowercase_ )
lowercase__ : str = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=lowercase_ , continuous_mask=lowercase_ , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
lowercase__ : List[str] = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=lowercase_ , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(lowercase_ )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowercase__ : Optional[int] = self.decode(
encodings_and_masks=lowercase_ , input_tokens=lowercase_ , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
lowercase__ : Optional[Any] = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ ).prev_sample
lowercase__ : Tuple = self.scale_to_features(lowercase_ , input_range=[-1.0, 1.0] )
lowercase__ : List[str] = mel[:1]
lowercase__ : Optional[int] = mel.cpu().float().numpy()
lowercase__ : str = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowercase_ , lowercase_ )
logger.info("Generated segment" , lowercase_ )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'." )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'." )
if output_type == "numpy":
lowercase__ : Union[str, Any] = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
lowercase__ : Dict = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=lowercase_ )
| 87 | 0 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase__ :
def __init__(self : Any , snake_case_ : List[str] , snake_case_ : Optional[int]=1_3 , snake_case_ : Dict=3 , snake_case_ : Dict=True , snake_case_ : Optional[Any]=True , snake_case_ : Any=0.1 , snake_case_ : Any=0.1 , snake_case_ : Union[str, Any]=2_2_4 , snake_case_ : List[str]=1_0_0_0 , snake_case_ : int=[3, 3, 6, 4] , snake_case_ : Union[str, Any]=[4_8, 5_6, 1_1_2, 2_2_0] , ):
__a : int = parent
__a : Tuple = batch_size
__a : Optional[Any] = num_channels
__a : Optional[int] = is_training
__a : Any = use_labels
__a : str = hidden_dropout_prob
__a : Tuple = attention_probs_dropout_prob
__a : List[str] = num_labels
__a : Tuple = image_size
__a : Dict = layer_depths
__a : str = embed_dims
def lowerCAmelCase (self : List[Any] ):
__a : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : List[Any] = None
if self.use_labels:
__a : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
__a : Optional[Any] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase (self : Optional[int] ):
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='''gelu''' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=snake_case_ , layer_scale_init_value=1E-5 , )
def lowerCAmelCase (self : str , snake_case_ : int , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] ):
__a : List[str] = SwiftFormerModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__a : List[str] = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def lowerCAmelCase (self : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : str , snake_case_ : str ):
__a : Tuple = self.num_labels
__a : Optional[int] = SwiftFormerForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
__a : Optional[Any] = model(snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
__a : int = SwiftFormerForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
__a : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : Any = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase (self : List[Any] ):
(__a) : int = self.prepare_config_and_inputs()
__a : str = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( __lowercase ,__lowercase ,unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE : List[str] = (
{"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
_SCREAMING_SNAKE_CASE : str = False
_SCREAMING_SNAKE_CASE : Tuple = False
_SCREAMING_SNAKE_CASE : Optional[Any] = False
_SCREAMING_SNAKE_CASE : Optional[int] = False
def lowerCAmelCase (self : Tuple ):
__a : int = SwiftFormerModelTester(self )
__a : Tuple = ConfigTester(
self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=3_7 , num_attention_heads=1_2 , num_hidden_layers=1_2 , )
def lowerCAmelCase (self : List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''SwiftFormer does not use inputs_embeds''' )
def lowerCAmelCase (self : Any ):
pass
def lowerCAmelCase (self : int ):
__a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Tuple = model_class(snake_case_ )
__a : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case_ , nn.Linear ) )
def lowerCAmelCase (self : Optional[int] ):
__a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : List[Any] = model_class(snake_case_ )
__a : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : Union[str, Any] = [*signature.parameters.keys()]
__a : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , snake_case_ )
def lowerCAmelCase (self : Union[str, Any] ):
__a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def lowerCAmelCase (self : int ):
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@slow
def lowerCAmelCase (self : Optional[Any] ):
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : str = SwiftFormerModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@unittest.skip(reason='''SwiftFormer does not output attentions''' )
def lowerCAmelCase (self : int ):
pass
def lowerCAmelCase (self : List[str] ):
def check_hidden_states_output(snake_case_ : List[str] , snake_case_ : str , snake_case_ : Dict ):
__a : Optional[int] = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
__a : Any = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
__a : Dict = outputs.hidden_states
__a : Optional[int] = 8
self.assertEqual(len(snake_case_ ) , snake_case_ ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(snake_case_ ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
__a : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Optional[int] = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a : Dict = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
def lowerCAmelCase (self : Union[str, Any] ):
def _config_zero_init(snake_case_ : List[str] ):
__a : List[Any] = copy.deepcopy(snake_case_ )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(snake_case_ , snake_case_ , 1E-10 )
if isinstance(getattr(snake_case_ , snake_case_ , snake_case_ ) , snake_case_ ):
__a : Optional[Any] = _config_zero_init(getattr(snake_case_ , snake_case_ ) )
setattr(snake_case_ , snake_case_ , snake_case_ )
return configs_no_init
__a : int = self.model_tester.prepare_config_and_inputs_for_common()
__a : List[Any] = _config_zero_init(snake_case_ )
for model_class in self.all_model_classes:
__a : Any = model_class(config=snake_case_ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowerCAmelCase (self : List[Any] ):
pass
def __UpperCamelCase ( ):
__a : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase (self : Union[str, Any] ):
return ViTImageProcessor.from_pretrained('''MBZUAI/swiftformer-xs''' ) if is_vision_available() else None
@slow
def lowerCAmelCase (self : Optional[Any] ):
__a : Dict = SwiftFormerForImageClassification.from_pretrained('''MBZUAI/swiftformer-xs''' ).to(snake_case_ )
__a : List[Any] = self.default_image_processor
__a : Dict = prepare_img()
__a : Union[str, Any] = image_processor(images=snake_case_ , return_tensors='''pt''' ).to(snake_case_ )
# forward pass
with torch.no_grad():
__a : List[Any] = model(**snake_case_ )
# verify the logits
__a : Tuple = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , snake_case_ )
__a : Any = torch.tensor([[-2.1703E00, 2.1107E00, -2.0811E00]] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case_ , atol=1E-4 ) )
| 363 |
def __UpperCamelCase ( lowerCAmelCase__ : str , lowerCAmelCase__ : str ):
__a : Any = len(lowerCAmelCase__ )
__a : Union[str, Any] = []
for i in range(len(lowerCAmelCase__ ) - pat_len + 1 ):
__a : List[Any] = True
for j in range(lowerCAmelCase__ ):
if s[i + j] != pattern[j]:
__a : Union[str, Any] = False
break
if match_found:
position.append(lowerCAmelCase__ )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 90 | 0 |
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : List[str] = logging.get_logger()
@dataclass
class lowerCamelCase_ :
'''simple docstring'''
__UpperCamelCase: nn.Module
__UpperCamelCase: List[nn.Module] = field(default_factory=snake_case__ )
__UpperCamelCase: list = field(default_factory=snake_case__ )
def _A ( self : List[str] , A : Optional[int] , A : Tensor , A : Tensor ):
_UpperCAmelCase : Union[str, Any] = len(list(m.modules() ) ) == 1 or isinstance(A , nn.Convad ) or isinstance(A , nn.BatchNormad )
if has_not_submodules:
self.traced.append(A )
def __call__( self : Any , A : Tensor ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(A )
[x.remove() for x in self.handles]
return self
@property
def _A ( self : List[Any] ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda A : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class lowerCamelCase_ :
'''simple docstring'''
__UpperCamelCase: nn.Module
__UpperCamelCase: nn.Module
__UpperCamelCase: int = 0
__UpperCamelCase: List = field(default_factory=snake_case__ )
__UpperCamelCase: List = field(default_factory=snake_case__ )
def __call__( self : Optional[Any] , A : Tensor ):
_UpperCAmelCase : Optional[Any] = Tracker(self.dest )(A ).parametrized
_UpperCAmelCase : List[Any] = Tracker(self.src )(A ).parametrized
_UpperCAmelCase : str = list(filter(lambda A : type(A ) not in self.src_skip , A ) )
_UpperCAmelCase : int = list(filter(lambda A : type(A ) not in self.dest_skip , A ) )
if len(A ) != len(A ):
raise Exception(
F"""Numbers of operations are different. Source module has {len(A )} operations while"""
F""" destination module has {len(A )}.""" )
for dest_m, src_m in zip(A , A ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F"""Transfered from={src_m} to={dest_m}""" )
def UpperCamelCase_ ( _UpperCAmelCase : str , _UpperCAmelCase : ResNetConfig , _UpperCAmelCase : Path , _UpperCAmelCase : bool = True ) -> str:
"""simple docstring"""
print(F"""Converting {name}...""" )
with torch.no_grad():
_UpperCAmelCase : Any = timm.create_model(_UpperCAmelCase , pretrained=_UpperCAmelCase ).eval()
_UpperCAmelCase : Union[str, Any] = ResNetForImageClassification(_UpperCAmelCase ).eval()
_UpperCAmelCase : Optional[int] = ModuleTransfer(src=_UpperCAmelCase , dest=_UpperCAmelCase )
_UpperCAmelCase : List[Any] = torch.randn((1, 3, 224, 224) )
module_transfer(_UpperCAmelCase )
assert torch.allclose(from_model(_UpperCAmelCase ) , our_model(_UpperCAmelCase ).logits ), "The model logits don't match the original one."
_UpperCAmelCase : Tuple = F"""resnet{'-'.join(name.split('resnet' ) )}"""
print(_UpperCAmelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add model" , use_temp_dir=_UpperCAmelCase , )
# we can use the convnext one
_UpperCAmelCase : Tuple = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add image processor" , use_temp_dir=_UpperCAmelCase , )
print(F"""Pushed {checkpoint_name}""" )
def UpperCamelCase_ ( _UpperCAmelCase : Path , _UpperCAmelCase : str = None , _UpperCAmelCase : bool = True ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Dict = "imagenet-1k-id2label.json"
_UpperCAmelCase : Optional[int] = 1_000
_UpperCAmelCase : Optional[int] = (1, num_labels)
_UpperCAmelCase : Union[str, Any] = "huggingface/label-files"
_UpperCAmelCase : int = num_labels
_UpperCAmelCase : Optional[int] = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type="dataset" ) , "r" ) )
_UpperCAmelCase : Optional[int] = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
_UpperCAmelCase : str = idalabel
_UpperCAmelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
_UpperCAmelCase : Union[str, Any] = partial(_UpperCAmelCase , num_labels=_UpperCAmelCase , idalabel=_UpperCAmelCase , labelaid=_UpperCAmelCase )
_UpperCAmelCase : Optional[int] = {
"resnet18": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ),
"resnet26": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ),
"resnet34": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ),
"resnet50": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ),
"resnet101": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ),
"resnet152": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ),
}
if model_name:
convert_weight_and_push(_UpperCAmelCase , names_to_config[model_name] , _UpperCAmelCase , _UpperCAmelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return config, expected_shape
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help=(
"""The name of the model you wish to convert, it must be one of the supported resnet* architecture,"""
""" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=Path,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
default=True,
type=bool,
required=False,
help="""If True, push model and image processor to the hub.""",
)
__SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
__SCREAMING_SNAKE_CASE : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 31 | '''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = None
__SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Union[str, Any] = """▁"""
__SCREAMING_SNAKE_CASE : str = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
__SCREAMING_SNAKE_CASE : int = {
"""vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""},
"""tokenizer_file""": {
"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"""
},
}
__SCREAMING_SNAKE_CASE : str = {
"""google/pegasus-xsum""": 512,
}
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Optional[int] = VOCAB_FILES_NAMES
__UpperCamelCase: Dict = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase: List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase: Optional[int] = PegasusTokenizer
__UpperCamelCase: Optional[Any] = ["input_ids", "attention_mask"]
def __init__( self : Dict , A : List[str]=None , A : Union[str, Any]=None , A : Optional[int]="<pad>" , A : Tuple="</s>" , A : Union[str, Any]="<unk>" , A : Union[str, Any]="<mask_2>" , A : Dict="<mask_1>" , A : Union[str, Any]=None , A : int=103 , **A : Optional[Any] , ):
_UpperCAmelCase : Dict = offset
if additional_special_tokens is not None:
if not isinstance(A , A ):
raise TypeError(
F"""additional_special_tokens should be of type {type(A )}, but is"""
F""" {type(A )}""" )
_UpperCAmelCase : Optional[int] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F"""<unk_{i}>""" for i in range(len(A ) , self.offset - 1 )
]
if len(set(A ) ) != len(A ):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
F""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
_UpperCAmelCase : Any = additional_special_tokens_extended
else:
_UpperCAmelCase : Dict = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F"""<unk_{i}>""" for i in range(2 , self.offset )]
super().__init__(
A , tokenizer_file=A , pad_token=A , eos_token=A , unk_token=A , mask_token=A , mask_token_sent=A , offset=A , additional_special_tokens=A , **A , )
_UpperCAmelCase : Optional[Any] = vocab_file
_UpperCAmelCase : Optional[Any] = False if not self.vocab_file else True
def _A ( self : List[str] , A : Optional[Any] ):
_UpperCAmelCase : Any = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
"There should be 3 special tokens: mask_token, pad_token, and eos_token +"
F""" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}""" )
return [1 if x in all_special_ids else 0 for x in seq]
def _A ( self : str , A : List , A : Optional[List] = None , A : bool = False ):
if already_has_special_tokens:
return self._special_token_mask(A )
elif token_ids_a is None:
return self._special_token_mask(A ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _A ( self : Optional[int] , A : Union[str, Any] , A : int=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _A ( self : Union[str, Any] , A : str , A : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_UpperCAmelCase : List[Any] = os.path.join(
A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file , A )
return (out_vocab_file,)
| 31 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_A = {"""configuration_unispeech""": ["""UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP""", """UniSpeechConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""UniSpeechForCTC""",
"""UniSpeechForPreTraining""",
"""UniSpeechForSequenceClassification""",
"""UniSpeechModel""",
"""UniSpeechPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 353 |
"""simple docstring"""
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
_A = logging.getLogger(__name__)
class _lowerCamelCase ( a_ ):
def __init__( self : List[Any] , UpperCamelCase : Dict=-1 ) -> List[Any]:
"""simple docstring"""
# in NER datasets, the last column is usually reserved for NER label
lowerCAmelCase__ : Optional[int] = label_idx
def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : Union[Split, str] ) -> List[InputExample]:
"""simple docstring"""
if isinstance(UpperCamelCase , UpperCamelCase ):
lowerCAmelCase__ : int = mode.value
lowerCAmelCase__ : Dict = os.path.join(UpperCamelCase , f"""{mode}.txt""" )
lowerCAmelCase__ : str = 1
lowerCAmelCase__ : List[Any] = []
with open(UpperCamelCase , encoding="""utf-8""" ) as f:
lowerCAmelCase__ : str = []
lowerCAmelCase__ : Any = []
for line in f:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=UpperCamelCase , labels=UpperCamelCase ) )
guid_index += 1
lowerCAmelCase__ : Optional[Any] = []
lowerCAmelCase__ : Tuple = []
else:
lowerCAmelCase__ : Optional[int] = line.split(""" """ )
words.append(splits[0] )
if len(UpperCamelCase ) > 1:
labels.append(splits[self.label_idx].replace("""\n""" , """""" ) )
else:
# Examples could have no label for mode = "test"
labels.append("""O""" )
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=UpperCamelCase , labels=UpperCamelCase ) )
return examples
def _lowerCAmelCase ( self : Any , UpperCamelCase : TextIO , UpperCamelCase : TextIO , UpperCamelCase : List ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Dict = 0
for line in test_input_reader:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
writer.write(UpperCamelCase )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
lowerCAmelCase__ : Union[str, Any] = line.split()[0] + """ """ + preds_list[example_id].pop(0 ) + """\n"""
writer.write(UpperCamelCase )
else:
logger.warning("""Maximum sequence length exceeded: No prediction for '%s'.""" , line.split()[0] )
def _lowerCAmelCase ( self : str , UpperCamelCase : str ) -> List[str]:
"""simple docstring"""
if path:
with open(UpperCamelCase , """r""" ) as f:
lowerCAmelCase__ : Any = f.read().splitlines()
if "O" not in labels:
lowerCAmelCase__ : List[str] = ["""O"""] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class _lowerCamelCase ( a_ ):
def __init__( self : Union[str, Any] ) -> Any:
"""simple docstring"""
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : str ) -> List[str]:
"""simple docstring"""
if path:
with open(UpperCamelCase , """r""" ) as f:
lowerCAmelCase__ : Any = f.read().splitlines()
if "O" not in labels:
lowerCAmelCase__ : str = ["""O"""] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class _lowerCamelCase ( a_ ):
def _lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase : Any , UpperCamelCase : Union[Split, str] ) -> List[InputExample]:
"""simple docstring"""
if isinstance(UpperCamelCase , UpperCamelCase ):
lowerCAmelCase__ : Optional[Any] = mode.value
lowerCAmelCase__ : int = os.path.join(UpperCamelCase , f"""{mode}.txt""" )
lowerCAmelCase__ : Optional[int] = 1
lowerCAmelCase__ : List[Any] = []
with open(UpperCamelCase , encoding="""utf-8""" ) as f:
for sentence in parse_incr(UpperCamelCase ):
lowerCAmelCase__ : Union[str, Any] = []
lowerCAmelCase__ : List[Any] = []
for token in sentence:
words.append(token["""form"""] )
labels.append(token["""upos"""] )
assert len(UpperCamelCase ) == len(UpperCamelCase )
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=UpperCamelCase , labels=UpperCamelCase ) )
guid_index += 1
return examples
def _lowerCAmelCase ( self : List[str] , UpperCamelCase : TextIO , UpperCamelCase : TextIO , UpperCamelCase : List ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Any = 0
for sentence in parse_incr(UpperCamelCase ):
lowerCAmelCase__ : Union[str, Any] = preds_list[example_id]
lowerCAmelCase__ : List[Any] = """"""
for token in sentence:
out += f"""{token['form']} ({token['upos']}|{s_p.pop(0 )}) """
out += "\n"
writer.write(UpperCamelCase )
example_id += 1
def _lowerCAmelCase ( self : Dict , UpperCamelCase : str ) -> List[str]:
"""simple docstring"""
if path:
with open(UpperCamelCase , """r""" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 212 | 0 |
'''simple docstring'''
import math
def __snake_case ( UpperCAmelCase_ : int ):
return math.sqrt(UpperCAmelCase_ ) * math.sqrt(UpperCAmelCase_ ) == num
def __snake_case ( UpperCAmelCase_ : int ):
lowerCamelCase_ = 0
lowerCamelCase_ = n
while left <= right:
lowerCamelCase_ = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
lowerCamelCase_ = mid - 1
else:
lowerCamelCase_ = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55 |
'''simple docstring'''
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
a_ : Dict = logging.get_logger(__name__)
class snake_case ( lowercase ):
"""simple docstring"""
def __init__( self , UpperCamelCase ):
"""simple docstring"""
super().__init__()
lowerCamelCase_ = nn.ModuleList(UpperCamelCase )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = False , UpperCamelCase = True , ):
"""simple docstring"""
for i, (image, scale, controlnet) in enumerate(zip(UpperCamelCase , UpperCamelCase , self.nets ) ):
lowerCamelCase_ ,lowerCamelCase_ = controlnet(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , )
# merge samples
if i == 0:
lowerCamelCase_ ,lowerCamelCase_ = down_samples, mid_sample
else:
lowerCamelCase_ = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(UpperCamelCase , UpperCamelCase )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def snake_case ( self , UpperCamelCase , UpperCamelCase = True , UpperCamelCase = None , UpperCamelCase = False , UpperCamelCase = None , ):
"""simple docstring"""
lowerCamelCase_ = 0
lowerCamelCase_ = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
UpperCamelCase , is_main_process=UpperCamelCase , save_function=UpperCamelCase , safe_serialization=UpperCamelCase , variant=UpperCamelCase , )
idx += 1
lowerCamelCase_ = model_path_to_save + f'''_{idx}'''
@classmethod
def snake_case ( cls , UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = 0
lowerCamelCase_ = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
lowerCamelCase_ = pretrained_model_path
while os.path.isdir(UpperCamelCase ):
lowerCamelCase_ = ControlNetModel.from_pretrained(UpperCamelCase , **UpperCamelCase )
controlnets.append(UpperCamelCase )
idx += 1
lowerCamelCase_ = pretrained_model_path + f'''_{idx}'''
logger.info(f'''{len(UpperCamelCase )} controlnets loaded from {pretrained_model_path}.''' )
if len(UpperCamelCase ) == 0:
raise ValueError(
f'''No ControlNets found under {os.path.dirname(UpperCamelCase )}. Expected at least {pretrained_model_path + "_0"}.''' )
return cls(UpperCamelCase )
| 55 | 1 |
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
UpperCAmelCase_ = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'
}
def lowerCAmelCase_ ( __UpperCAmelCase: str = "dhaka" , __UpperCAmelCase: int = 5 ) -> int:
UpperCamelCase__ : int = min(__lowerCamelCase , 50 ) # Prevent abuse!
UpperCamelCase__ : Dict = {
'''q''': query,
'''tbm''': '''isch''',
'''hl''': '''en''',
'''ijn''': '''0''',
}
UpperCamelCase__ : Any = requests.get('''https://www.google.com/search''' , params=__lowerCamelCase , headers=__lowerCamelCase )
UpperCamelCase__ : Union[str, Any] = BeautifulSoup(html.text , '''html.parser''' )
UpperCamelCase__ : Optional[int] = ''''''.join(
re.findall(r'''AF_initDataCallback\(([^<]+)\);''' , str(soup.select('''script''' ) ) ) )
UpperCamelCase__ : str = json.dumps(__lowerCamelCase )
UpperCamelCase__ : List[Any] = json.loads(__lowerCamelCase )
UpperCamelCase__ : Optional[Any] = re.findall(
r'''\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",''' , __lowerCamelCase , )
if not matched_google_image_data:
return 0
UpperCamelCase__ : List[Any] = re.sub(
r'''\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]''' , '''''' , str(__lowerCamelCase ) , )
UpperCamelCase__ : Union[str, Any] = re.findall(
r'''(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]''' , __lowerCamelCase , )
for index, fixed_full_res_image in enumerate(__lowerCamelCase ):
if index >= max_images:
return index
UpperCamelCase__ : Dict = bytes(__lowerCamelCase , '''ascii''' ).decode(
'''unicode-escape''' )
UpperCamelCase__ : Dict = bytes(__lowerCamelCase , '''ascii''' ).decode(
'''unicode-escape''' )
UpperCamelCase__ : Dict = urllib.request.build_opener()
UpperCamelCase__ : Union[str, Any] = [
(
'''User-Agent''',
'''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'''
''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582''',
)
]
urllib.request.install_opener(__lowerCamelCase )
UpperCamelCase__ : Tuple = f"query_{query.replace(' ' , '_' )}"
if not os.path.exists(__lowerCamelCase ):
os.makedirs(__lowerCamelCase )
urllib.request.urlretrieve( # noqa: S310
__lowerCamelCase , f"{path_name}/original_size_img_{index}.jpg" )
return index
if __name__ == "__main__":
try:
UpperCAmelCase_ = download_images_from_google_query(sys.argv[1])
print(F'''{image_count} images were downloaded to disk.''')
except IndexError:
print('Please provide a search term.')
raise
| 368 |
import random
def lowerCAmelCase_ ( __UpperCAmelCase: list , __UpperCAmelCase: Optional[Any] ) -> tuple:
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : int = [], [], []
for element in data:
if element < pivot:
less.append(__UpperCAmelCase )
elif element > pivot:
greater.append(__UpperCAmelCase )
else:
equal.append(__UpperCAmelCase )
return less, equal, greater
def lowerCAmelCase_ ( __UpperCAmelCase: list , __UpperCAmelCase: int ) -> List[str]:
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(__UpperCAmelCase ) or index < 0:
return None
UpperCamelCase__ : Tuple = items[random.randint(0 , len(__UpperCAmelCase ) - 1 )]
UpperCamelCase__ : Union[str, Any] = 0
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Dict = _partition(__UpperCAmelCase , __UpperCAmelCase )
UpperCamelCase__ : Union[str, Any] = len(__UpperCAmelCase )
UpperCamelCase__ : List[str] = len(__UpperCAmelCase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(__UpperCAmelCase , __UpperCAmelCase )
# must be in larger
else:
return quick_select(__UpperCAmelCase , index - (m + count) )
| 247 | 0 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"The `inpainting.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionInpaintPipeline` instead."
)
| 168 | """simple docstring"""
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
# Initialise PyTorch model
lowercase__: Optional[Any] = FunnelConfig.from_json_file(__UpperCAmelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
lowercase__: List[Any] = FunnelBaseModel(__UpperCAmelCase ) if base_model else FunnelModel(__UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , __UpperCAmelCase )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not."
)
__A = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 177 | 0 |
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class A :
def __init__(self : Tuple , __UpperCAmelCase : str , ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = 1_3
UpperCAmelCase__ = 7
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = True
UpperCAmelCase__ = 9_9
UpperCAmelCase__ = 3_2
UpperCAmelCase__ = 2
UpperCAmelCase__ = 4
UpperCAmelCase__ = 3_7
UpperCAmelCase__ = "gelu"
UpperCAmelCase__ = 0.1
UpperCAmelCase__ = 0.1
UpperCAmelCase__ = 5_1_2
UpperCAmelCase__ = 1_6
UpperCAmelCase__ = 2
UpperCAmelCase__ = 0.02
UpperCAmelCase__ = 3
UpperCAmelCase__ = 4
UpperCAmelCase__ = None
def lowercase_ (self : int ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ = None
if self.use_input_mask:
UpperCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ (self : int , __UpperCAmelCase : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Tuple ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = TFDistilBertModel(config=__UpperCAmelCase )
UpperCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask}
UpperCAmelCase__ = model(__UpperCAmelCase )
UpperCAmelCase__ = [input_ids, input_mask]
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ (self : str , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Dict ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = TFDistilBertForMaskedLM(config=__UpperCAmelCase )
UpperCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask}
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ (self : int , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[str] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = TFDistilBertForQuestionAnswering(config=__UpperCAmelCase )
UpperCAmelCase__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
}
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ (self : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = TFDistilBertForSequenceClassification(__UpperCAmelCase )
UpperCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask}
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ (self : Optional[int] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Any ) -> int:
"""simple docstring"""
UpperCAmelCase__ = self.num_choices
UpperCAmelCase__ = TFDistilBertForMultipleChoice(__UpperCAmelCase )
UpperCAmelCase__ = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
}
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase_ (self : Dict , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = TFDistilBertForTokenClassification(__UpperCAmelCase )
UpperCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask}
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ (self : Any ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) = config_and_inputs
UpperCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class A ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : Union[str, Any] = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
__UpperCAmelCase : Optional[int] = (
{
'feature-extraction': TFDistilBertModel,
'fill-mask': TFDistilBertForMaskedLM,
'question-answering': TFDistilBertForQuestionAnswering,
'text-classification': TFDistilBertForSequenceClassification,
'token-classification': TFDistilBertForTokenClassification,
'zero-shot': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : str = False
def lowercase_ (self : Union[str, Any] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = TFDistilBertModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=__UpperCAmelCase , dim=3_7 )
def lowercase_ (self : Any ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase_ (self : int ) -> int:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__UpperCAmelCase )
def lowercase_ (self : Optional[int] ) -> int:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__UpperCAmelCase )
def lowercase_ (self : Any ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__UpperCAmelCase )
def lowercase_ (self : List[str] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__UpperCAmelCase )
def lowercase_ (self : Dict ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__UpperCAmelCase )
def lowercase_ (self : str ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__UpperCAmelCase )
@slow
def lowercase_ (self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
UpperCAmelCase__ = TFDistilBertModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@require_tf
class A ( unittest.TestCase ):
@slow
def lowercase_ (self : List[Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = TFDistilBertModel.from_pretrained("distilbert-base-uncased" )
UpperCAmelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase__ = model(__UpperCAmelCase )[0]
UpperCAmelCase__ = [1, 6, 7_6_8]
self.assertEqual(output.shape , __UpperCAmelCase )
UpperCAmelCase__ = tf.constant(
[
[
[0.19261885, -0.13732955, 0.4119799],
[0.22150156, -0.07422661, 0.39037204],
[0.22756018, -0.0896414, 0.3701467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 )
| 143 | import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
UpperCamelCase__ = None
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = '▁'
UpperCamelCase__ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
UpperCamelCase__ = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'},
'tokenizer_file': {
'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'
},
}
UpperCamelCase__ = {
'google/pegasus-xsum': 5_1_2,
}
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : str = VOCAB_FILES_NAMES
__UpperCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Union[str, Any] = PegasusTokenizer
__UpperCAmelCase : Any = ['input_ids', 'attention_mask']
def __init__(self : Optional[int] , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Any=None , __UpperCAmelCase : Union[str, Any]="<pad>" , __UpperCAmelCase : List[str]="</s>" , __UpperCAmelCase : Union[str, Any]="<unk>" , __UpperCAmelCase : int="<mask_2>" , __UpperCAmelCase : Optional[Any]="<mask_1>" , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : str=1_0_3 , **__UpperCAmelCase : str , ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = offset
if additional_special_tokens is not None:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError(
f"""additional_special_tokens should be of type {type(__UpperCAmelCase )}, but is"""
f""" {type(__UpperCAmelCase )}""" )
UpperCAmelCase__ = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"""<unk_{i}>""" for i in range(len(__UpperCAmelCase ) , self.offset - 1 )
]
if len(set(__UpperCAmelCase ) ) != len(__UpperCAmelCase ):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
f""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
UpperCAmelCase__ = additional_special_tokens_extended
else:
UpperCAmelCase__ = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"""<unk_{i}>""" for i in range(2 , self.offset )]
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , pad_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , mask_token_sent=__UpperCAmelCase , offset=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , **__UpperCAmelCase , )
UpperCAmelCase__ = vocab_file
UpperCAmelCase__ = False if not self.vocab_file else True
def lowercase_ (self : List[Any] , __UpperCAmelCase : Tuple ) -> int:
"""simple docstring"""
UpperCAmelCase__ = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
"There should be 3 special tokens: mask_token, pad_token, and eos_token +"
f""" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}""" )
return [1 if x in all_special_ids else 0 for x in seq]
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : List , __UpperCAmelCase : Optional[List] = None , __UpperCAmelCase : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(__UpperCAmelCase )
elif token_ids_a is None:
return self._special_token_mask(__UpperCAmelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowercase_ (self : str , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any]=None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase_ (self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase__ = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.vocab_file , __UpperCAmelCase )
return (out_vocab_file,)
| 143 | 1 |
import os
import numpy
import onnx
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = a.name
SCREAMING_SNAKE_CASE__ = b.name
SCREAMING_SNAKE_CASE__ = ''''''
SCREAMING_SNAKE_CASE__ = ''''''
SCREAMING_SNAKE_CASE__ = a == b
SCREAMING_SNAKE_CASE__ = name_a
SCREAMING_SNAKE_CASE__ = name_b
return res
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(_A , _A )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , _A , _A )
_graph_replace_input_with(node_proto.attribute[1].g , _A , _A )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , _A , _A )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
for n in graph_proto.node:
_node_replace_input_with(_A , _A , _A )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = list(model.graph.initializer )
SCREAMING_SNAKE_CASE__ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
SCREAMING_SNAKE_CASE__ = inits[i].name
SCREAMING_SNAKE_CASE__ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , _A , _A )
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = os.path.dirname(_A )
SCREAMING_SNAKE_CASE__ = os.path.basename(_A )
SCREAMING_SNAKE_CASE__ = onnx.load(os.path.join(_A , _A ) )
SCREAMING_SNAKE_CASE__ = list(model.graph.initializer )
SCREAMING_SNAKE_CASE__ = set()
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = 0
for i in range(len(_A ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(_A ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(_A )
dup_set.add(_A )
SCREAMING_SNAKE_CASE__ = inits[j].data_type
SCREAMING_SNAKE_CASE__ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('''unexpected data type: ''' , _A )
total_reduced_size += mem_size
SCREAMING_SNAKE_CASE__ = inits[i].name
SCREAMING_SNAKE_CASE__ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(_A )
else:
SCREAMING_SNAKE_CASE__ = [name_j]
ind_to_replace.append((j, i) )
print('''total reduced size: ''' , total_reduced_size / 10_24 / 10_24 / 10_24 , '''GB''' )
SCREAMING_SNAKE_CASE__ = sorted(_A )
_remove_dup_initializers_from_model(_A , _A , _A )
SCREAMING_SNAKE_CASE__ = '''optimized_''' + model_file_name
SCREAMING_SNAKE_CASE__ = os.path.join(_A , _A )
onnx.save(_A , _A )
return new_model
| 314 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : List[Any] = {
'''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': (
'''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'''
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "trajectory_transformer"
a = ["past_key_values"]
a = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Tuple , __lowerCamelCase : Any=100 , __lowerCamelCase : str=5 , __lowerCamelCase : int=1 , __lowerCamelCase : Tuple=1 , __lowerCamelCase : List[Any]=249 , __lowerCamelCase : List[str]=6 , __lowerCamelCase : Dict=17 , __lowerCamelCase : str=25 , __lowerCamelCase : Union[str, Any]=4 , __lowerCamelCase : List[Any]=4 , __lowerCamelCase : Dict=128 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : str=0.0006 , __lowerCamelCase : Any=512 , __lowerCamelCase : List[Any]=0.02 , __lowerCamelCase : Tuple=1e-12 , __lowerCamelCase : Optional[int]=1 , __lowerCamelCase : Any=True , __lowerCamelCase : List[str]=1 , __lowerCamelCase : Tuple=5_0256 , __lowerCamelCase : Dict=5_0256 , **__lowerCamelCase : str , ) -> Dict:
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = action_weight
SCREAMING_SNAKE_CASE__ = reward_weight
SCREAMING_SNAKE_CASE__ = value_weight
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = block_size
SCREAMING_SNAKE_CASE__ = action_dim
SCREAMING_SNAKE_CASE__ = observation_dim
SCREAMING_SNAKE_CASE__ = transition_dim
SCREAMING_SNAKE_CASE__ = learning_rate
SCREAMING_SNAKE_CASE__ = n_layer
SCREAMING_SNAKE_CASE__ = n_head
SCREAMING_SNAKE_CASE__ = n_embd
SCREAMING_SNAKE_CASE__ = embd_pdrop
SCREAMING_SNAKE_CASE__ = attn_pdrop
SCREAMING_SNAKE_CASE__ = resid_pdrop
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = kaiming_initializer_range
SCREAMING_SNAKE_CASE__ = use_cache
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
| 314 | 1 |
import logging
import os
from .state import PartialState
class __snake_case ( logging.LoggerAdapter):
"""simple docstring"""
@staticmethod
def __lowercase ( lowerCamelCase : str ) -> Optional[Any]:
lowerCAmelCase_ : Tuple = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def __lowercase ( self : Any , lowerCamelCase : int , lowerCamelCase : List[Any] , *lowerCamelCase : Optional[Any] , **lowerCamelCase : Optional[Any] ) -> List[Any]:
if PartialState._shared_state == {}:
raise RuntimeError(
"""You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.""" )
lowerCAmelCase_ : List[Any] = kwargs.pop("""main_process_only""" , a__ )
lowerCAmelCase_ : Any = kwargs.pop("""in_order""" , a__ )
if self.isEnabledFor(a__ ):
if self._should_log(a__ ):
lowerCAmelCase_, lowerCAmelCase_ : Union[str, Any] = self.process(a__ , a__ )
self.logger.log(a__ , a__ , *a__ , **a__ )
elif in_order:
lowerCAmelCase_ : List[str] = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
lowerCAmelCase_, lowerCAmelCase_ : Optional[int] = self.process(a__ , a__ )
self.logger.log(a__ , a__ , *a__ , **a__ )
state.wait_for_everyone()
def UpperCamelCase_ ( A__ : str , A__ : str = None ):
'''simple docstring'''
if log_level is None:
lowerCAmelCase_ : Tuple = os.environ.get("""ACCELERATE_LOG_LEVEL""" , A__ )
lowerCAmelCase_ : Any = logging.getLogger(A__ )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(A__ , {} )
| 353 |
'''simple docstring'''
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
__A : Tuple = "sshleifer/mar_enro_6_3_student"
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
def __lowercase ( self : List[Any] ) -> Optional[Any]:
super().setUp()
lowerCAmelCase_ : Any = cached_path(
"""https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz""" , extract_compressed_file=lowerCamelCase , )
lowerCAmelCase_ : Optional[Any] = F'{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'
@slow
@require_torch_gpu
def __lowercase ( self : str ) -> str:
MarianMTModel.from_pretrained(lowerCamelCase )
@slow
@require_torch_gpu
def __lowercase ( self : List[Any] ) -> Union[str, Any]:
lowerCAmelCase_ : str = {
"""$MAX_LEN""": 64,
"""$BS""": 64,
"""$GAS""": 1,
"""$ENRO_DIR""": self.data_dir,
"""facebook/mbart-large-cc25""": MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
"""--learning_rate=3e-5""": """--learning_rate 3e-4""",
"""--num_train_epochs 6""": """--num_train_epochs 1""",
}
# Clean up bash script
lowerCAmelCase_ : Dict = (self.test_file_dir / """train_mbart_cc25_enro.sh""").open().read().split("""finetune.py""" )[1].strip()
lowerCAmelCase_ : Optional[int] = bash_script.replace("""\\\n""" , """""" ).strip().replace("""\"$@\"""" , """""" )
for k, v in env_vars_to_replace.items():
lowerCAmelCase_ : Optional[int] = bash_script.replace(lowerCamelCase , str(lowerCamelCase ) )
lowerCAmelCase_ : Optional[Any] = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
lowerCAmelCase_ : Tuple = F'\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n '.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
lowerCAmelCase_ : Tuple = ["""finetune.py"""] + bash_script.split() + args
with patch.object(lowerCamelCase , """argv""" , lowerCamelCase ):
lowerCAmelCase_ : Optional[Any] = argparse.ArgumentParser()
lowerCAmelCase_ : Any = pl.Trainer.add_argparse_args(lowerCamelCase )
lowerCAmelCase_ : List[str] = SummarizationModule.add_model_specific_args(lowerCamelCase , os.getcwd() )
lowerCAmelCase_ : Tuple = parser.parse_args()
lowerCAmelCase_ : Dict = main(lowerCamelCase )
# Check metrics
lowerCAmelCase_ : int = load_json(model.metrics_save_path )
lowerCAmelCase_ : Optional[Any] = metrics["""val"""][0]
lowerCAmelCase_ : Tuple = metrics["""val"""][-1]
self.assertEqual(len(metrics["""val"""] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[F'val_avg_{model.val_metric}'] , lowerCamelCase )
self.assertGreater(last_step_stats["""val_avg_gen_time"""] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats["""val_avg_gen_time"""] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats["""val_avg_bleu"""] - first_step_stats["""val_avg_bleu"""] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats["""val_avg_bleu"""] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics["""val"""][-1]["""val_avg_bleu"""] - metrics["""test"""][-1]["""test_avg_bleu"""] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
lowerCAmelCase_ : Union[str, Any] = os.listdir(lowerCamelCase )
lowerCAmelCase_ : Any = [x for x in contents if x.endswith(""".ckpt""" )][0]
lowerCAmelCase_ : Union[str, Any] = os.path.join(args.output_dir , lowerCamelCase )
lowerCAmelCase_ : int = torch.load(lowerCamelCase , map_location="""cpu""" )
lowerCAmelCase_ : List[str] = """model.model.decoder.layers.0.encoder_attn_layer_norm.weight"""
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
lowerCAmelCase_ : List[str] = {os.path.basename(lowerCamelCase ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["""test"""] ) == 1
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
@timeout_decorator.timeout(6_00 )
@slow
@require_torch_gpu
def __lowercase ( self : Optional[Any] ) -> Dict:
lowerCAmelCase_ : List[str] = F'{self.test_file_dir_str}/test_data/wmt_en_ro'
lowerCAmelCase_ : Dict = {
"""--fp16_opt_level=O1""": """""",
"""$MAX_LEN""": 1_28,
"""$BS""": 16,
"""$GAS""": 1,
"""$ENRO_DIR""": data_dir,
"""$m""": """sshleifer/student_marian_en_ro_6_1""",
"""val_check_interval=0.25""": """val_check_interval=1.0""",
}
# Clean up bash script
lowerCAmelCase_ : int = (
(self.test_file_dir / """distil_marian_no_teacher.sh""").open().read().split("""distillation.py""" )[1].strip()
)
lowerCAmelCase_ : str = bash_script.replace("""\\\n""" , """""" ).strip().replace("""\"$@\"""" , """""" )
lowerCAmelCase_ : Tuple = bash_script.replace("""--fp16 """ , """ """ )
for k, v in env_vars_to_replace.items():
lowerCAmelCase_ : Optional[int] = bash_script.replace(lowerCamelCase , str(lowerCamelCase ) )
lowerCAmelCase_ : int = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ : Optional[Any] = bash_script.replace("""--fp16""" , """""" )
lowerCAmelCase_ : Dict = 6
lowerCAmelCase_ : List[Any] = (
["""distillation.py"""]
+ bash_script.split()
+ [
F'--output_dir={output_dir}',
"""--gpus=1""",
"""--learning_rate=1e-3""",
F'--num_train_epochs={epochs}',
"""--warmup_steps=10""",
"""--val_check_interval=1.0""",
"""--do_predict""",
]
)
with patch.object(lowerCamelCase , """argv""" , lowerCamelCase ):
lowerCAmelCase_ : Dict = argparse.ArgumentParser()
lowerCAmelCase_ : int = pl.Trainer.add_argparse_args(lowerCamelCase )
lowerCAmelCase_ : List[str] = SummarizationDistiller.add_model_specific_args(lowerCamelCase , os.getcwd() )
lowerCAmelCase_ : List[Any] = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
lowerCAmelCase_ : str = distill_main(lowerCamelCase )
# Check metrics
lowerCAmelCase_ : Union[str, Any] = load_json(model.metrics_save_path )
lowerCAmelCase_ : Union[str, Any] = metrics["""val"""][0]
lowerCAmelCase_ : Union[str, Any] = metrics["""val"""][-1]
assert len(metrics["""val"""] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[F'val_avg_{model.val_metric}'] , lowerCamelCase )
# check lightning ckpt can be loaded and has a reasonable statedict
lowerCAmelCase_ : Union[str, Any] = os.listdir(lowerCamelCase )
lowerCAmelCase_ : Dict = [x for x in contents if x.endswith(""".ckpt""" )][0]
lowerCAmelCase_ : Optional[int] = os.path.join(args.output_dir , lowerCamelCase )
lowerCAmelCase_ : int = torch.load(lowerCamelCase , map_location="""cpu""" )
lowerCAmelCase_ : Tuple = """model.model.decoder.layers.0.encoder_attn_layer_norm.weight"""
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
lowerCAmelCase_ : Union[str, Any] = {os.path.basename(lowerCamelCase ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["""test"""] ) == 1
| 89 | 0 |
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class A ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = IFPipeline
lowerCamelCase = TEXT_TO_IMAGE_PARAMS - {'width', 'height', 'latents'}
lowerCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCamelCase = PipelineTesterMixin.required_optional_params - {'latents'}
def snake_case__ ( self : str )-> List[Any]:
'''simple docstring'''
return self._get_dummy_components()
def snake_case__ ( self : Optional[Any],lowercase_ : Tuple,lowercase_ : List[str]=0 )-> List[str]:
'''simple docstring'''
if str(lowercase_ ).startswith('mps' ):
A__ = torch.manual_seed(lowercase_ )
else:
A__ = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
A__ = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def snake_case__ ( self : List[str] )-> Optional[int]:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda',reason='float16 requires CUDA' )
def snake_case__ ( self : Optional[Any] )-> Union[str, Any]:
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def snake_case__ ( self : Optional[int] )-> Optional[int]:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def snake_case__ ( self : Union[str, Any] )-> Union[str, Any]:
'''simple docstring'''
self._test_save_load_local()
def snake_case__ ( self : str )-> Tuple:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2,)
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available(),reason='XFormers attention is only available with CUDA and `xformers` installed',)
def snake_case__ ( self : Any )-> Any:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : Any )-> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self : Dict )-> Union[str, Any]:
'''simple docstring'''
A__ = IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0',variant='fp16',torch_dtype=torch.floataa )
A__ = IFSuperResolutionPipeline.from_pretrained(
'DeepFloyd/IF-II-L-v1.0',variant='fp16',torch_dtype=torch.floataa,text_encoder=lowercase_,tokenizer=lowercase_ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('cuda' )
A__ , A__ = pipe_a.encode_prompt('anime turtle',device='cuda' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
A__ = None
A__ = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(lowercase_,lowercase_,lowercase_,lowercase_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
A__ = IFImgaImgPipeline(**pipe_a.components )
A__ = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(lowercase_,lowercase_,lowercase_,lowercase_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
A__ = IFInpaintingPipeline(**pipe_a.components )
A__ = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(lowercase_,lowercase_,lowercase_,lowercase_ )
def snake_case__ ( self : Tuple,lowercase_ : List[Any],lowercase_ : Optional[int],lowercase_ : Any,lowercase_ : Optional[Any] )-> Dict:
'''simple docstring'''
_start_torch_memory_measurement()
A__ = torch.Generator(device='cpu' ).manual_seed(0 )
A__ = pipe_a(
prompt_embeds=lowercase_,negative_prompt_embeds=lowercase_,num_inference_steps=2,generator=lowercase_,output_type='np',)
A__ = output.images[0]
assert image.shape == (6_4, 6_4, 3)
A__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_3 * 1_0**9
A__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy' )
assert_mean_pixel_difference(lowercase_,lowercase_ )
# pipeline 2
_start_torch_memory_measurement()
A__ = torch.Generator(device='cpu' ).manual_seed(0 )
A__ = floats_tensor((1, 3, 6_4, 6_4),rng=random.Random(0 ) ).to(lowercase_ )
A__ = pipe_a(
prompt_embeds=lowercase_,negative_prompt_embeds=lowercase_,image=lowercase_,generator=lowercase_,num_inference_steps=2,output_type='np',)
A__ = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
A__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
A__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy' )
assert_mean_pixel_difference(lowercase_,lowercase_ )
def snake_case__ ( self : Union[str, Any],lowercase_ : List[Any],lowercase_ : Optional[int],lowercase_ : List[str],lowercase_ : Any )-> Optional[int]:
'''simple docstring'''
_start_torch_memory_measurement()
A__ = floats_tensor((1, 3, 6_4, 6_4),rng=random.Random(0 ) ).to(lowercase_ )
A__ = torch.Generator(device='cpu' ).manual_seed(0 )
A__ = pipe_a(
prompt_embeds=lowercase_,negative_prompt_embeds=lowercase_,image=lowercase_,num_inference_steps=2,generator=lowercase_,output_type='np',)
A__ = output.images[0]
assert image.shape == (6_4, 6_4, 3)
A__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
A__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy' )
assert_mean_pixel_difference(lowercase_,lowercase_ )
# pipeline 2
_start_torch_memory_measurement()
A__ = torch.Generator(device='cpu' ).manual_seed(0 )
A__ = floats_tensor((1, 3, 2_5_6, 2_5_6),rng=random.Random(0 ) ).to(lowercase_ )
A__ = floats_tensor((1, 3, 6_4, 6_4),rng=random.Random(0 ) ).to(lowercase_ )
A__ = pipe_a(
prompt_embeds=lowercase_,negative_prompt_embeds=lowercase_,image=lowercase_,original_image=lowercase_,generator=lowercase_,num_inference_steps=2,output_type='np',)
A__ = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
A__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
A__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy' )
assert_mean_pixel_difference(lowercase_,lowercase_ )
def snake_case__ ( self : Optional[Any],lowercase_ : str,lowercase_ : int,lowercase_ : List[str],lowercase_ : int )-> Tuple:
'''simple docstring'''
_start_torch_memory_measurement()
A__ = floats_tensor((1, 3, 6_4, 6_4),rng=random.Random(0 ) ).to(lowercase_ )
A__ = floats_tensor((1, 3, 6_4, 6_4),rng=random.Random(1 ) ).to(lowercase_ )
A__ = torch.Generator(device='cpu' ).manual_seed(0 )
A__ = pipe_a(
prompt_embeds=lowercase_,negative_prompt_embeds=lowercase_,image=lowercase_,mask_image=lowercase_,num_inference_steps=2,generator=lowercase_,output_type='np',)
A__ = output.images[0]
assert image.shape == (6_4, 6_4, 3)
A__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
A__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy' )
assert_mean_pixel_difference(lowercase_,lowercase_ )
# pipeline 2
_start_torch_memory_measurement()
A__ = torch.Generator(device='cpu' ).manual_seed(0 )
A__ = floats_tensor((1, 3, 6_4, 6_4),rng=random.Random(0 ) ).to(lowercase_ )
A__ = floats_tensor((1, 3, 2_5_6, 2_5_6),rng=random.Random(0 ) ).to(lowercase_ )
A__ = floats_tensor((1, 3, 2_5_6, 2_5_6),rng=random.Random(1 ) ).to(lowercase_ )
A__ = pipe_a(
prompt_embeds=lowercase_,negative_prompt_embeds=lowercase_,image=lowercase_,mask_image=lowercase_,original_image=lowercase_,generator=lowercase_,num_inference_steps=2,output_type='np',)
A__ = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
A__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
A__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy' )
assert_mean_pixel_difference(lowercase_,lowercase_ )
def _snake_case( ) -> Dict:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 7 |
'''simple docstring'''
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase=1 ):
"""simple docstring"""
if n_shave_prefix_segments >= 0:
return ".".join(path.split(""".""" )[n_shave_prefix_segments:] )
else:
return ".".join(path.split(""".""" )[:n_shave_prefix_segments] )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase=0 ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = []
for old_item in old_list:
lowerCAmelCase__ : Optional[Any] = old_item.replace("""in_layers.0""" , """norm1""" )
lowerCAmelCase__ : Optional[int] = new_item.replace("""in_layers.2""" , """conv1""" )
lowerCAmelCase__ : Dict = new_item.replace("""out_layers.0""" , """norm2""" )
lowerCAmelCase__ : str = new_item.replace("""out_layers.3""" , """conv2""" )
lowerCAmelCase__ : str = new_item.replace("""emb_layers.1""" , """time_emb_proj""" )
lowerCAmelCase__ : Optional[Any] = new_item.replace("""skip_connection""" , """conv_shortcut""" )
lowerCAmelCase__ : Union[str, Any] = shave_segments(UpperCamelCase , n_shave_prefix_segments=UpperCamelCase )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase=0 ):
"""simple docstring"""
lowerCAmelCase__ : int = []
for old_item in old_list:
lowerCAmelCase__ : List[str] = old_item
lowerCAmelCase__ : int = new_item.replace("""norm.weight""" , """group_norm.weight""" )
lowerCAmelCase__ : Optional[Any] = new_item.replace("""norm.bias""" , """group_norm.bias""" )
lowerCAmelCase__ : Optional[Any] = new_item.replace("""proj_out.weight""" , """proj_attn.weight""" )
lowerCAmelCase__ : int = new_item.replace("""proj_out.bias""" , """proj_attn.bias""" )
lowerCAmelCase__ : str = shave_segments(UpperCamelCase , n_shave_prefix_segments=UpperCamelCase )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
lowerCAmelCase__ : Any = old_checkpoint[path]
lowerCAmelCase__ : int = old_tensor.shape[0] // 3
lowerCAmelCase__ : int = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
lowerCAmelCase__ : Tuple = old_tensor.shape[0] // config["""num_head_channels"""] // 3
lowerCAmelCase__ : List[Any] = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = old_tensor.split(channels // num_heads , dim=1 )
lowerCAmelCase__ : int = query.reshape(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = key.reshape(UpperCamelCase )
lowerCAmelCase__ : Optional[int] = value.reshape(UpperCamelCase )
for path in paths:
lowerCAmelCase__ : Any = path["""new"""]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
lowerCAmelCase__ : Any = new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" )
lowerCAmelCase__ : Any = new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" )
lowerCAmelCase__ : Any = new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" )
if additional_replacements is not None:
for replacement in additional_replacements:
lowerCAmelCase__ : Any = new_path.replace(replacement["""old"""] , replacement["""new"""] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
lowerCAmelCase__ : List[Any] = old_checkpoint[path["""old"""]][:, :, 0]
else:
lowerCAmelCase__ : Dict = old_checkpoint[path["""old"""]]
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : str = {}
lowerCAmelCase__ : str = checkpoint["""time_embed.0.weight"""]
lowerCAmelCase__ : List[Any] = checkpoint["""time_embed.0.bias"""]
lowerCAmelCase__ : int = checkpoint["""time_embed.2.weight"""]
lowerCAmelCase__ : List[str] = checkpoint["""time_embed.2.bias"""]
lowerCAmelCase__ : str = checkpoint["""input_blocks.0.0.weight"""]
lowerCAmelCase__ : Any = checkpoint["""input_blocks.0.0.bias"""]
lowerCAmelCase__ : Union[str, Any] = checkpoint["""out.0.weight"""]
lowerCAmelCase__ : Union[str, Any] = checkpoint["""out.0.bias"""]
lowerCAmelCase__ : str = checkpoint["""out.2.weight"""]
lowerCAmelCase__ : Tuple = checkpoint["""out.2.bias"""]
# Retrieves the keys for the input blocks only
lowerCAmelCase__ : Optional[Any] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} )
lowerCAmelCase__ : Optional[Any] = {
layer_id: [key for key in checkpoint if f"""input_blocks.{layer_id}""" in key]
for layer_id in range(UpperCamelCase )
}
# Retrieves the keys for the middle blocks only
lowerCAmelCase__ : Union[str, Any] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} )
lowerCAmelCase__ : Union[str, Any] = {
layer_id: [key for key in checkpoint if f"""middle_block.{layer_id}""" in key]
for layer_id in range(UpperCamelCase )
}
# Retrieves the keys for the output blocks only
lowerCAmelCase__ : List[str] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} )
lowerCAmelCase__ : List[Any] = {
layer_id: [key for key in checkpoint if f"""output_blocks.{layer_id}""" in key]
for layer_id in range(UpperCamelCase )
}
for i in range(1 , UpperCamelCase ):
lowerCAmelCase__ : Dict = (i - 1) // (config["""num_res_blocks"""] + 1)
lowerCAmelCase__ : Tuple = (i - 1) % (config["""num_res_blocks"""] + 1)
lowerCAmelCase__ : Optional[int] = [key for key in input_blocks[i] if f"""input_blocks.{i}.0""" in key]
lowerCAmelCase__ : Optional[Any] = [key for key in input_blocks[i] if f"""input_blocks.{i}.1""" in key]
if f"""input_blocks.{i}.0.op.weight""" in checkpoint:
lowerCAmelCase__ : Optional[int] = checkpoint[
f"""input_blocks.{i}.0.op.weight"""
]
lowerCAmelCase__ : Tuple = checkpoint[
f"""input_blocks.{i}.0.op.bias"""
]
continue
lowerCAmelCase__ : Optional[Any] = renew_resnet_paths(UpperCamelCase )
lowerCAmelCase__ : Dict = {"""old""": f"""input_blocks.{i}.0""", """new""": f"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""}
lowerCAmelCase__ : Optional[Any] = {"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""}
assign_to_checkpoint(
UpperCamelCase , UpperCamelCase , UpperCamelCase , additional_replacements=[meta_path, resnet_op] , config=UpperCamelCase )
if len(UpperCamelCase ):
lowerCAmelCase__ : Optional[Any] = renew_attention_paths(UpperCamelCase )
lowerCAmelCase__ : Tuple = {
"""old""": f"""input_blocks.{i}.1""",
"""new""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
lowerCAmelCase__ : List[str] = {
f"""input_blocks.{i}.1.qkv.bias""": {
"""key""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
"""query""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
"""value""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""input_blocks.{i}.1.qkv.weight""": {
"""key""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
"""query""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
"""value""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
UpperCamelCase , UpperCamelCase , UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=UpperCamelCase , config=UpperCamelCase , )
lowerCAmelCase__ : Dict = middle_blocks[0]
lowerCAmelCase__ : Union[str, Any] = middle_blocks[1]
lowerCAmelCase__ : Dict = middle_blocks[2]
lowerCAmelCase__ : Any = renew_resnet_paths(UpperCamelCase )
assign_to_checkpoint(UpperCamelCase , UpperCamelCase , UpperCamelCase , config=UpperCamelCase )
lowerCAmelCase__ : Dict = renew_resnet_paths(UpperCamelCase )
assign_to_checkpoint(UpperCamelCase , UpperCamelCase , UpperCamelCase , config=UpperCamelCase )
lowerCAmelCase__ : Optional[int] = renew_attention_paths(UpperCamelCase )
lowerCAmelCase__ : Optional[int] = {
"""middle_block.1.qkv.bias""": {
"""key""": """mid_block.attentions.0.key.bias""",
"""query""": """mid_block.attentions.0.query.bias""",
"""value""": """mid_block.attentions.0.value.bias""",
},
"""middle_block.1.qkv.weight""": {
"""key""": """mid_block.attentions.0.key.weight""",
"""query""": """mid_block.attentions.0.query.weight""",
"""value""": """mid_block.attentions.0.value.weight""",
},
}
assign_to_checkpoint(
UpperCamelCase , UpperCamelCase , UpperCamelCase , attention_paths_to_split=UpperCamelCase , config=UpperCamelCase )
for i in range(UpperCamelCase ):
lowerCAmelCase__ : Tuple = i // (config["""num_res_blocks"""] + 1)
lowerCAmelCase__ : List[str] = i % (config["""num_res_blocks"""] + 1)
lowerCAmelCase__ : int = [shave_segments(UpperCamelCase , 2 ) for name in output_blocks[i]]
lowerCAmelCase__ : Union[str, Any] = {}
for layer in output_block_layers:
lowerCAmelCase__ , lowerCAmelCase__ : Any = layer.split(""".""" )[0], shave_segments(UpperCamelCase , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(UpperCamelCase )
else:
lowerCAmelCase__ : str = [layer_name]
if len(UpperCamelCase ) > 1:
lowerCAmelCase__ : str = [key for key in output_blocks[i] if f"""output_blocks.{i}.0""" in key]
lowerCAmelCase__ : Dict = [key for key in output_blocks[i] if f"""output_blocks.{i}.1""" in key]
lowerCAmelCase__ : Optional[int] = renew_resnet_paths(UpperCamelCase )
lowerCAmelCase__ : int = renew_resnet_paths(UpperCamelCase )
lowerCAmelCase__ : Optional[int] = {"""old""": f"""output_blocks.{i}.0""", """new""": f"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""}
assign_to_checkpoint(UpperCamelCase , UpperCamelCase , UpperCamelCase , additional_replacements=[meta_path] , config=UpperCamelCase )
if ["conv.weight", "conv.bias"] in output_block_list.values():
lowerCAmelCase__ : List[Any] = list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] )
lowerCAmelCase__ : int = checkpoint[
f"""output_blocks.{i}.{index}.conv.weight"""
]
lowerCAmelCase__ : int = checkpoint[
f"""output_blocks.{i}.{index}.conv.bias"""
]
# Clear attentions as they have been attributed above.
if len(UpperCamelCase ) == 2:
lowerCAmelCase__ : Tuple = []
if len(UpperCamelCase ):
lowerCAmelCase__ : Dict = renew_attention_paths(UpperCamelCase )
lowerCAmelCase__ : Tuple = {
"""old""": f"""output_blocks.{i}.1""",
"""new""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
lowerCAmelCase__ : Tuple = {
f"""output_blocks.{i}.1.qkv.bias""": {
"""key""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
"""query""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
"""value""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""output_blocks.{i}.1.qkv.weight""": {
"""key""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
"""query""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
"""value""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
UpperCamelCase , UpperCamelCase , UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=UpperCamelCase , )
else:
lowerCAmelCase__ : int = renew_resnet_paths(UpperCamelCase , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
lowerCAmelCase__ : Tuple = """.""".join(["""output_blocks""", str(UpperCamelCase ), path["""old"""]] )
lowerCAmelCase__ : List[Any] = """.""".join(["""up_blocks""", str(UpperCamelCase ), """resnets""", str(UpperCamelCase ), path["""new"""]] )
lowerCAmelCase__ : Union[str, Any] = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
_lowerCAmelCase = parser.parse_args()
_lowerCAmelCase = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
_lowerCAmelCase = json.loads(f.read())
_lowerCAmelCase = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
_lowerCAmelCase = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
_lowerCAmelCase = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
_lowerCAmelCase = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
_lowerCAmelCase = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 37 | 0 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
__SCREAMING_SNAKE_CASE = '''0.12''' # assumed parallelism: 8
if is_torch_available():
import torch
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
if rng is None:
A : List[Any] = random.Random()
A : Any = 1
for dim in shape:
total_dims *= dim
A : List[Any] = []
for _ in range(_lowerCamelCase ):
values.append(rng.randint(0 , vocab_size - 1 ) )
A : Any = np.array(_lowerCamelCase , dtype=jnp.intaa ).reshape(_lowerCamelCase )
return output
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase=None ):
A : List[Any] = ids_tensor(_lowerCamelCase , vocab_size=2 , rng=_lowerCamelCase )
# make sure that at least one token is attended to for each batch
A : Optional[Any] = 1
return attn_mask
@require_flax
class lowerCamelCase_ :
'''simple docstring'''
a__ = None
a__ = ()
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> int:
A : Any = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
A : Optional[int] = 2
A : Optional[Any] = inputs["""input_ids"""].shape[-1] // 2
A : Union[str, Any] = inputs["""input_ids"""][:max_batch_size, :sequence_length]
A : Any = jnp.ones_like(__lowerCamelCase )
A : Optional[int] = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
A : Any = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
A : Optional[int] = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict:
A : str = self._get_input_ids_and_config()
A : int = False
A : Dict = max_length
A : Dict = 0
for model_class in self.all_generative_model_classes:
A : str = model_class(__lowerCamelCase )
A : Tuple = model_class.__name__[4:] # Skip the "Flax" at the beginning
A : List[str] = getattr(__lowerCamelCase , __lowerCamelCase )
A : Union[str, Any] = pt_model_class(__lowerCamelCase ).eval()
A : str = load_flax_weights_in_pytorch_model(__lowerCamelCase , flax_model.params )
A : List[str] = flax_model.generate(__lowerCamelCase ).sequences
A : Dict = pt_model.generate(torch.tensor(__lowerCamelCase , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
A : int = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> List[str]:
A : str = self._get_input_ids_and_config()
A : Tuple = False
A : Any = max_length
for model_class in self.all_generative_model_classes:
A : Optional[Any] = model_class(__lowerCamelCase )
A : Any = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
A : List[str] = jit(model.generate )
A : Dict = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[int]:
A : List[Any] = self._get_input_ids_and_config()
A : List[str] = True
A : Union[str, Any] = max_length
for model_class in self.all_generative_model_classes:
A : Optional[int] = model_class(__lowerCamelCase )
A : Tuple = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
A : Optional[Any] = jit(model.generate )
A : Union[str, Any] = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[Any]:
A : List[Any] = self._get_input_ids_and_config()
A : Optional[Any] = False
A : Optional[Any] = max_length
A : Tuple = 2
for model_class in self.all_generative_model_classes:
A : List[Any] = model_class(__lowerCamelCase )
A : List[str] = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
A : List[str] = jit(model.generate )
A : Any = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> str:
A : int = self._get_input_ids_and_config()
A : Any = False
A : Tuple = max_length
A : Dict = 2
A : int = 2
for model_class in self.all_generative_model_classes:
A : List[str] = model_class(__lowerCamelCase )
A : Union[str, Any] = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Union[str, Any]:
A : Any = self._get_input_ids_and_config()
A : Optional[Any] = True
A : Any = max_length
A : int = 0.8
A : Union[str, Any] = 10
A : List[str] = 0.3
A : List[str] = 1
A : Union[str, Any] = 8
A : Any = 9
for model_class in self.all_generative_model_classes:
A : int = model_class(__lowerCamelCase )
A : str = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
A : Tuple = jit(model.generate )
A : Dict = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> int:
A : str = self._get_input_ids_and_config()
A : Tuple = max_length
A : Dict = 1
A : str = 8
A : List[str] = 9
for model_class in self.all_generative_model_classes:
A : int = model_class(__lowerCamelCase )
A : Union[str, Any] = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
A : Optional[int] = jit(model.generate )
A : Union[str, Any] = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[str]:
A : Optional[Any] = self._get_input_ids_and_config()
A : Any = max_length
A : List[Any] = 2
A : Union[str, Any] = 1
A : Any = 8
A : Union[str, Any] = 9
for model_class in self.all_generative_model_classes:
A : Optional[int] = model_class(__lowerCamelCase )
A : List[str] = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
A : List[Any] = jit(model.generate )
A : str = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Tuple:
A : List[str] = self._get_input_ids_and_config()
# pad attention mask on the left
A : Optional[int] = attention_mask.at[(0, 0)].set(0 )
A : Union[str, Any] = False
A : Dict = max_length
for model_class in self.all_generative_model_classes:
A : str = model_class(__lowerCamelCase )
A : List[str] = model.generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
A : Dict = jit(model.generate )
A : Tuple = jit_generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Union[str, Any]:
A : Any = self._get_input_ids_and_config()
# pad attention mask on the left
A : Dict = attention_mask.at[(0, 0)].set(0 )
A : Dict = True
A : Tuple = max_length
for model_class in self.all_generative_model_classes:
A : List[Any] = model_class(__lowerCamelCase )
A : List[str] = model.generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
A : Optional[Any] = jit(model.generate )
A : Optional[int] = jit_generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> str:
A : Optional[int] = self._get_input_ids_and_config()
# pad attention mask on the left
A : List[Any] = attention_mask.at[(0, 0)].set(0 )
A : int = 2
A : Optional[Any] = max_length
for model_class in self.all_generative_model_classes:
A : Tuple = model_class(__lowerCamelCase )
A : List[Any] = model.generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
A : Dict = jit(model.generate )
A : Tuple = jit_generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[int]:
A : Dict = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert" )
A : Union[str, Any] = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
A : Dict = """Hello world"""
A : Optional[int] = tokenizer(__lowerCamelCase , return_tensors="np" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(__lowerCamelCase , "do_samples" ):
model.generate(__lowerCamelCase , do_samples=__lowerCamelCase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(__lowerCamelCase , "foo" ):
A : str = {"""foo""": """bar"""}
model.generate(__lowerCamelCase , **__lowerCamelCase ) | 368 |
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="""%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s""",
datefmt="""%Y-%m-%d %H:%M:%S""",
level=os.environ.get("""LOGLEVEL""", """INFO""").upper(),
stream=sys.stdout,
)
__SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
__SCREAMING_SNAKE_CASE = {"""facebook/bart-base""": BartForConditionalGeneration}
__SCREAMING_SNAKE_CASE = {"""facebook/bart-base""": BartTokenizer}
def UpperCAmelCase ( ):
A : List[Any] = argparse.ArgumentParser(description="Export Bart model + Beam Search to ONNX graph." )
parser.add_argument(
"--validation_file" , type=_lowerCamelCase , default=_lowerCamelCase , help="A csv or a json file containing the validation data." )
parser.add_argument(
"--max_length" , type=_lowerCamelCase , default=5 , help="The maximum total input sequence length after tokenization." , )
parser.add_argument(
"--num_beams" , type=_lowerCamelCase , default=_lowerCamelCase , help=(
"Number of beams to use for evaluation. This argument will be "
"passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."
) , )
parser.add_argument(
"--model_name_or_path" , type=_lowerCamelCase , help="Path to pretrained model or model identifier from huggingface.co/models." , required=_lowerCamelCase , )
parser.add_argument(
"--config_name" , type=_lowerCamelCase , default=_lowerCamelCase , help="Pretrained config name or path if not the same as model_name" , )
parser.add_argument(
"--device" , type=_lowerCamelCase , default="cpu" , help="Device where the model will be run" , )
parser.add_argument("--output_file_path" , type=_lowerCamelCase , default=_lowerCamelCase , help="Where to store the final ONNX file." )
A : Any = parser.parse_args()
return args
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase="cpu" ):
A : int = model_dict[model_name].from_pretrained(_lowerCamelCase ).to(_lowerCamelCase )
A : List[Any] = tokenizer_dict[model_name].from_pretrained(_lowerCamelCase )
if model_name in ["facebook/bart-base"]:
A : Optional[int] = 0
A : Union[str, Any] = None
A : Optional[Any] = 0
return huggingface_model, tokenizer
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
model.eval()
A : Optional[Any] = None
A : List[Any] = torch.jit.script(BARTBeamSearchGenerator(_lowerCamelCase ) )
with torch.no_grad():
A : int = "My friends are cool but they eat too many carbs."
A : List[Any] = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors="pt" ).to(model.device )
A : int = model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , num_beams=_lowerCamelCase , max_length=_lowerCamelCase , early_stopping=_lowerCamelCase , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
_lowerCamelCase , (
inputs["input_ids"],
inputs["attention_mask"],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , _lowerCamelCase , opset_version=14 , input_names=["input_ids", "attention_mask", "num_beams", "max_length", "decoder_start_token_id"] , output_names=["output_ids"] , dynamic_axes={
"input_ids": {0: "batch", 1: "seq"},
"output_ids": {0: "batch", 1: "seq_out"},
} , example_outputs=_lowerCamelCase , )
logger.info("Model exported to {}".format(_lowerCamelCase ) )
A : Optional[Any] = remove_dup_initializers(os.path.abspath(_lowerCamelCase ) )
logger.info("Deduplicated and optimized model written to {}".format(_lowerCamelCase ) )
A : List[Any] = onnxruntime.InferenceSession(_lowerCamelCase )
A : Dict = ort_sess.run(
_lowerCamelCase , {
"input_ids": inputs["input_ids"].cpu().numpy(),
"attention_mask": inputs["attention_mask"].cpu().numpy(),
"num_beams": np.array(_lowerCamelCase ),
"max_length": np.array(_lowerCamelCase ),
"decoder_start_token_id": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1e-3 , atol=1e-3 )
logger.info("Model outputs from torch and ONNX Runtime are similar." )
logger.info("Success." )
def UpperCAmelCase ( ):
A : Union[str, Any] = parse_args()
A : List[Any] = 5
A : str = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
A : Union[str, Any] = torch.device(args.device )
A , A : Optional[int] = load_model_tokenizer(args.model_name_or_path , _lowerCamelCase )
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined" )
model.to(_lowerCamelCase )
if args.max_length:
A : Optional[int] = args.max_length
if args.num_beams:
A : List[Any] = args.num_beams
if args.output_file_path:
A : int = args.output_file_path
else:
A : int = "BART.onnx"
logger.info("Exporting model to ONNX" )
export_and_validate_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
main() | 256 | 0 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
UpperCamelCase = logging.get_logger(__name__)
@add_end_docstrings(__A )
class snake_case_ ( __A ):
def __init__( self : str , *lowercase_ : Any , **lowercase_ : Optional[Any] ) -> Dict:
super().__init__(*lowercase_ , **lowercase_ )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def __UpperCamelCase ( self : int , lowercase_ : List[Any]=None ) -> List[Any]:
lowercase__ : Optional[int] = {}
if top_k is not None:
lowercase__ : Tuple = top_k
return {}, {}, postprocess_params
def __call__( self : Optional[Any] , lowercase_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowercase_ : Optional[int] ) -> Optional[int]:
return super().__call__(lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : Any , lowercase_ : Union[str, Any] ) -> Any:
lowercase__ : str = load_image(lowercase_ )
lowercase__ : str = self.image_processor(images=lowercase_ , return_tensors=self.framework )
return model_inputs
def __UpperCamelCase ( self : int , lowercase_ : Any ) -> List[str]:
lowercase__ : Optional[Any] = self.model(**lowercase_ )
return model_outputs
def __UpperCamelCase ( self : List[Any] , lowercase_ : List[Any] , lowercase_ : List[Any]=5 ) -> Optional[Any]:
if top_k > self.model.config.num_labels:
lowercase__ : Optional[Any] = self.model.config.num_labels
if self.framework == "pt":
lowercase__ : int = model_outputs.logits.softmax(-1 )[0]
lowercase__ , lowercase__ : Optional[Any] = probs.topk(lowercase_ )
elif self.framework == "tf":
lowercase__ : Optional[int] = stable_softmax(model_outputs.logits , axis=-1 )[0]
lowercase__ : int = tf.math.top_k(lowercase_ , k=lowercase_ )
lowercase__ , lowercase__ : Any = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
lowercase__ : Any = scores.tolist()
lowercase__ : List[Any] = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowercase_ , lowercase_ )]
| 87 | from PIL import Image
def lowercase_ ( _lowerCamelCase : Image , _lowerCamelCase : int):
lowercase__ : List[str] = (259 * (level + 255)) / (255 * (259 - level))
def contrast(_lowerCamelCase : int) -> int:
return int(128 + factor * (c - 128))
return img.point(_lowerCamelCase)
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change contrast to 170
UpperCamelCase = change_contrast(img, 170)
cont_img.save('''image_data/lena_high_contrast.png''', format='''png''')
| 87 | 1 |
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __a ( snake_case__, unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = BloomTokenizerFast
SCREAMING_SNAKE_CASE_ = BloomTokenizerFast
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = 'tokenizer_file'
SCREAMING_SNAKE_CASE_ = {'bos_token': '<s>', 'eos_token': '</s>', 'unk_token': '<unk>', 'pad_token': '<pad>'}
def _lowerCAmelCase ( self : Optional[int] ):
super().setUp()
UpperCamelCase__ : Optional[Any] =BloomTokenizerFast.from_pretrained('''bigscience/tokenizer''' )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCAmelCase ( self : Optional[int] , **lowercase_ : List[Any] ):
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **lowercase_ )
def _lowerCAmelCase ( self : int ):
UpperCamelCase__ : List[Any] =self.get_rust_tokenizer()
UpperCamelCase__ : int =['''The quick brown fox</s>''', '''jumps over the lazy dog</s>''']
UpperCamelCase__ : Union[str, Any] =[[2175, 2_3714, 7_3173, 14_4252, 2], [77, 13_2619, 3478, 368, 10_9586, 3_5433, 2]]
UpperCamelCase__ : List[Any] =tokenizer.batch_encode_plus(lowercase_ )['''input_ids''']
self.assertListEqual(lowercase_ , lowercase_ )
UpperCamelCase__ : List[str] =tokenizer.batch_decode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def _lowerCAmelCase ( self : List[Any] , lowercase_ : Optional[Any]=6 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase__ : Dict =self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
UpperCamelCase__ : Optional[int] ='''This is a simple input'''
UpperCamelCase__ : Optional[Any] =['''This is a simple input 1''', '''This is a simple input 2''']
UpperCamelCase__ : int =('''This is a simple input''', '''This is a pair''')
UpperCamelCase__ : int =[
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
try:
tokenizer_r.encode(lowercase_ , max_length=lowercase_ )
tokenizer_r.encode_plus(lowercase_ , max_length=lowercase_ )
tokenizer_r.batch_encode_plus(lowercase_ , max_length=lowercase_ )
tokenizer_r.encode(lowercase_ , max_length=lowercase_ )
tokenizer_r.batch_encode_plus(lowercase_ , max_length=lowercase_ )
except ValueError:
self.fail('''Bloom Tokenizer should be able to deal with padding''' )
UpperCamelCase__ : Union[str, Any] =None # Hotfixing padding = None
self.assertRaises(lowercase_ , tokenizer_r.encode , lowercase_ , max_length=lowercase_ , padding='''max_length''' )
# Simple input
self.assertRaises(lowercase_ , tokenizer_r.encode_plus , lowercase_ , max_length=lowercase_ , padding='''max_length''' )
# Simple input
self.assertRaises(
lowercase_ , tokenizer_r.batch_encode_plus , lowercase_ , max_length=lowercase_ , padding='''max_length''' , )
# Pair input
self.assertRaises(lowercase_ , tokenizer_r.encode , lowercase_ , max_length=lowercase_ , padding='''max_length''' )
# Pair input
self.assertRaises(lowercase_ , tokenizer_r.encode_plus , lowercase_ , max_length=lowercase_ , padding='''max_length''' )
# Pair input
self.assertRaises(
lowercase_ , tokenizer_r.batch_encode_plus , lowercase_ , max_length=lowercase_ , padding='''max_length''' , )
def _lowerCAmelCase ( self : Dict ):
UpperCamelCase__ : str =self.get_rust_tokenizer()
UpperCamelCase__ : List[str] =load_dataset('''xnli''' , '''all_languages''' , split='''test''' , streaming=lowercase_ )
UpperCamelCase__ : str =next(iter(lowercase_ ) )['''premise'''] # pick up one data
UpperCamelCase__ : Tuple =list(sample_data.values() )
UpperCamelCase__ : int =list(map(tokenizer.encode , lowercase_ ) )
UpperCamelCase__ : int =[tokenizer.decode(lowercase_ , clean_up_tokenization_spaces=lowercase_ ) for x in output_tokens]
self.assertListEqual(lowercase_ , lowercase_ )
def _lowerCAmelCase ( self : str ):
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 157 |
"""simple docstring"""
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
_SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : List[Any] = {
"""tensor(bool)""": np.bool_,
"""tensor(int8)""": np.inta,
"""tensor(uint8)""": np.uinta,
"""tensor(int16)""": np.intaa,
"""tensor(uint16)""": np.uintaa,
"""tensor(int32)""": np.intaa,
"""tensor(uint32)""": np.uintaa,
"""tensor(int64)""": np.intaa,
"""tensor(uint64)""": np.uintaa,
"""tensor(float16)""": np.floataa,
"""tensor(float)""": np.floataa,
"""tensor(double)""": np.floataa,
}
class __a :
"""simple docstring"""
def __init__( self : Optional[Any] , lowercase_ : Tuple=None , **lowercase_ : int ):
logger.info('''`diffusers.OnnxRuntimeModel` is experimental and might change in the future.''' )
UpperCamelCase__ : Optional[Any] =model
UpperCamelCase__ : str =kwargs.get('''model_save_dir''' , lowercase_ )
UpperCamelCase__ : int =kwargs.get('''latest_model_name''' , lowercase_ )
def __call__( self : Any , **lowercase_ : Any ):
UpperCamelCase__ : str ={k: np.array(lowercase_ ) for k, v in kwargs.items()}
return self.model.run(lowercase_ , lowercase_ )
@staticmethod
def _lowerCAmelCase ( lowercase_ : Union[str, Path] , lowercase_ : Dict=None , lowercase_ : Optional[Any]=None ):
if provider is None:
logger.info('''No onnxruntime provider specified, using CPUExecutionProvider''' )
UpperCamelCase__ : List[str] ='''CPUExecutionProvider'''
return ort.InferenceSession(lowercase_ , providers=[provider] , sess_options=lowercase_ )
def _lowerCAmelCase ( self : Union[str, Any] , lowercase_ : Union[str, Path] , lowercase_ : Optional[str] = None , **lowercase_ : Union[str, Any] ):
UpperCamelCase__ : Union[str, Any] =file_name if file_name is not None else ONNX_WEIGHTS_NAME
UpperCamelCase__ : Tuple =self.model_save_dir.joinpath(self.latest_model_name )
UpperCamelCase__ : str =Path(lowercase_ ).joinpath(lowercase_ )
try:
shutil.copyfile(lowercase_ , lowercase_ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
UpperCamelCase__ : List[str] =self.model_save_dir.joinpath(lowercase_ )
if src_path.exists():
UpperCamelCase__ : List[str] =Path(lowercase_ ).joinpath(lowercase_ )
try:
shutil.copyfile(lowercase_ , lowercase_ )
except shutil.SameFileError:
pass
def _lowerCAmelCase ( self : Tuple , lowercase_ : Union[str, os.PathLike] , **lowercase_ : int , ):
if os.path.isfile(lowercase_ ):
logger.error(f'''Provided path ({save_directory}) should be a directory, not a file''' )
return
os.makedirs(lowercase_ , exist_ok=lowercase_ )
# saving model weights/files
self._save_pretrained(lowercase_ , **lowercase_ )
@classmethod
def _lowerCAmelCase ( cls : List[str] , lowercase_ : Union[str, Path] , lowercase_ : Optional[Union[bool, str, None]] = None , lowercase_ : Optional[Union[str, None]] = None , lowercase_ : bool = False , lowercase_ : Optional[str] = None , lowercase_ : Optional[str] = None , lowercase_ : Optional[str] = None , lowercase_ : Optional["ort.SessionOptions"] = None , **lowercase_ : List[Any] , ):
UpperCamelCase__ : Union[str, Any] =file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(lowercase_ ):
UpperCamelCase__ : Any =OnnxRuntimeModel.load_model(
os.path.join(lowercase_ , lowercase_ ) , provider=lowercase_ , sess_options=lowercase_ )
UpperCamelCase__ : List[str] =Path(lowercase_ )
# load model from hub
else:
# download model
UpperCamelCase__ : Tuple =hf_hub_download(
repo_id=lowercase_ , filename=lowercase_ , use_auth_token=lowercase_ , revision=lowercase_ , cache_dir=lowercase_ , force_download=lowercase_ , )
UpperCamelCase__ : Any =Path(lowercase_ ).parent
UpperCamelCase__ : List[Any] =Path(lowercase_ ).name
UpperCamelCase__ : Optional[int] =OnnxRuntimeModel.load_model(lowercase_ , provider=lowercase_ , sess_options=lowercase_ )
return cls(model=lowercase_ , **lowercase_ )
@classmethod
def _lowerCAmelCase ( cls : Dict , lowercase_ : Union[str, Path] , lowercase_ : bool = True , lowercase_ : Optional[str] = None , lowercase_ : Optional[str] = None , **lowercase_ : List[Any] , ):
UpperCamelCase__ : Dict =None
if len(str(lowercase_ ).split('''@''' ) ) == 2:
UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] =model_id.split('''@''' )
return cls._from_pretrained(
model_id=lowercase_ , revision=lowercase_ , cache_dir=lowercase_ , force_download=lowercase_ , use_auth_token=lowercase_ , **lowercase_ , )
| 157 | 1 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
UpperCAmelCase_ :int = XLNetTokenizer
UpperCAmelCase_ :Optional[int] = XLNetTokenizerFast
UpperCAmelCase_ :Optional[int] = True
UpperCAmelCase_ :int = True
def __lowerCAmelCase ( self ) -> Union[str, Any]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase_ :Optional[int] = XLNetTokenizer(__A , keep_accents=__A )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :Optional[int] = """<s>"""
lowerCAmelCase_ :int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__A ) , __A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__A ) , __A )
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """<eod>""" )
self.assertEqual(len(__A ) , 1006 )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Any = XLNetTokenizer(__A , keep_accents=__A )
lowerCAmelCase_ :Dict = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , [285, 46, 10, 170, 382] )
lowerCAmelCase_ :Dict = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCAmelCase_ :int = tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(__A , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
lowerCAmelCase_ :List[str] = tokenizer.convert_ids_to_tokens(__A )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Tuple = XLNetTokenizer(__A , do_lower_case=__A )
lowerCAmelCase_ :Optional[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + """""",
"""i""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""▁he""", """ll""", """o"""] )
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :Optional[Any] = XLNetTokenizer(__A , do_lower_case=__A )
lowerCAmelCase_ :List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Optional[Any] = XLNetTokenizer.from_pretrained("""xlnet-base-cased""" )
lowerCAmelCase_ :Dict = tokenizer.encode("""sequence builders""" , add_special_tokens=__A )
lowerCAmelCase_ :int = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__A )
lowerCAmelCase_ :int = tokenizer.build_inputs_with_special_tokens(__A )
lowerCAmelCase_ :Optional[Any] = tokenizer.build_inputs_with_special_tokens(__A , __A )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def __lowerCAmelCase ( self ) -> Dict:
# fmt: off
lowerCAmelCase_ :Union[str, Any] = {"""input_ids""": [[17, 2_1442, 270, 17, 10, 1_4645, 318, 34, 17, 4546, 3145, 787, 13, 7752, 2_2018, 23, 21, 17, 4546, 3145, 787, 13, 3352, 1_4431, 13, 5500, 11, 1176, 580, 13, 1_6819, 4797, 23, 17, 10, 1_7135, 658, 19, 457, 7932, 13, 184, 19, 3154, 1_7135, 6468, 19, 1404, 1_2269, 19, 4229, 5356, 1_6264, 46, 19, 17, 2_0545, 1_0395, 9, 9, 9, 11, 28, 6421, 9531, 2_0729, 17, 10, 353, 1_7022, 11, 21, 6421, 9531, 1_6949, 17, 10, 1_1509, 753, 11, 33, 95, 2421, 7385, 956, 1_4431, 2626, 25, 842, 7385, 4836, 21, 1429, 2272, 9855, 3120, 161, 2_4738, 19, 1_3203, 658, 218, 787, 21, 430, 1_8482, 847, 2637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 2_2178, 27, 1064, 22, 956, 13, 1_1101, 1429, 5854, 2_4313, 1_8953, 40, 422, 2_4366, 68, 1758, 37, 1_0483, 1_4257, 31, 207, 263, 21, 203, 3773, 25, 71, 9735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2049, 3442, 17, 1_3894, 3380, 23, 95, 18, 1_7634, 2288, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__A , model_name="""xlnet-base-cased""" , revision="""c841166438c31ec7ca9a106dee7bb312b73ae511""" , )
| 84 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Dict:
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=__A , )
assert hasattr(self , """env""" )
def __lowerCAmelCase ( self , __A ) -> Any:
# configuration for running training on smdistributed Model Parallel
lowerCAmelCase_ :Union[str, Any] = {
"""enabled""": True,
"""processes_per_host""": 8,
}
lowerCAmelCase_ :Tuple = {
"""enabled""": True,
"""parameters""": {
"""microbatches""": 4,
"""placement_strategy""": """spread""",
"""pipeline""": """interleaved""",
"""optimize""": """speed""",
"""partitions""": 4,
"""ddp""": True,
},
}
lowerCAmelCase_ :Any = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options}
lowerCAmelCase_ :Any = """trainer""" if self.script == """run_glue.py""" else """smtrainer"""
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=__A , instance_type=self.instance_type , debugger_hook_config=__A , hyperparameters={
**self.env.hyperparameters,
"""model_name_or_path""": self.model_name_or_path,
"""max_steps""": 500,
} , metric_definitions=self.env.metric_definitions , distribution=__A , py_version="""py36""" , )
def __lowerCAmelCase ( self , __A ) -> List[Any]:
TrainingJobAnalytics(__A ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def __lowerCAmelCase ( self , __A ) -> List[str]:
# create estimator
lowerCAmelCase_ :Any = self.create_estimator(__A )
# run training
estimator.fit()
# result dataframe
lowerCAmelCase_ :Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCAmelCase_ :List[str] = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
lowerCAmelCase_ :Optional[int] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCAmelCase_ :Optional[int] = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , __A )
| 84 | 1 |
from math import ceil
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = list(range(0 , _lowercase ) )
UpperCAmelCase_ : Optional[int] = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
UpperCAmelCase_ : List[Any] = []
for i in device_map_blocks:
if device_map_blocks.count(_lowercase ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(_lowercase )
# Missing blocks
UpperCAmelCase_ : Any = [i for i in blocks if i not in device_map_blocks]
UpperCAmelCase_ : Union[str, Any] = [i for i in device_map_blocks if i not in blocks]
if len(_lowercase ) != 0:
raise ValueError(
'''Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.'''
''' These attention blocks were specified more than once: ''' + str(_lowercase ) )
if len(_lowercase ) != 0:
raise ValueError(
'''There are attention blocks for this model that are not specified in the device_map. Add these attention '''
'''blocks to a device on the device_map: ''' + str(_lowercase ) )
if len(_lowercase ) != 0:
raise ValueError(
'''The device_map contains more attention blocks than this model has. Remove these from the device_map:'''
+ str(_lowercase ) )
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Dict = list(range(_lowercase ) )
UpperCAmelCase_ : Optional[Any] = int(ceil(n_layers / len(_lowercase ) ) )
UpperCAmelCase_ : str = [layers[i : i + n_blocks] for i in range(0 , _lowercase , _lowercase )]
return dict(zip(_lowercase , _lowercase ) ) | 235 |
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def lowerCamelCase__ ( _lowercase , _lowercase=0 ):
'''simple docstring'''
return sorted(_lowercase , key=lambda _lowercase : x[column] )
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase=float('''inf''' ) ):
'''simple docstring'''
for i in range(points_counts - 1 ):
for j in range(i + 1 , _lowercase ):
UpperCAmelCase_ : Optional[int] = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
UpperCAmelCase_ : Optional[Any] = current_dis
return min_dis
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase=float('''inf''' ) ):
'''simple docstring'''
for i in range(min(6 , points_counts - 1 ) , _lowercase ):
for j in range(max(0 , i - 6 ) , _lowercase ):
UpperCAmelCase_ : List[str] = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
UpperCAmelCase_ : Optional[int] = current_dis
return min_dis
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if points_counts <= 3:
return dis_between_closest_pair(_lowercase , _lowercase )
# recursion
UpperCAmelCase_ : Optional[int] = points_counts // 2
UpperCAmelCase_ : List[Any] = closest_pair_of_points_sqr(
_lowercase , points_sorted_on_y[:mid] , _lowercase )
UpperCAmelCase_ : Dict = closest_pair_of_points_sqr(
_lowercase , points_sorted_on_y[mid:] , points_counts - mid )
UpperCAmelCase_ : Union[str, Any] = min(_lowercase , _lowercase )
UpperCAmelCase_ : str = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(_lowercase )
UpperCAmelCase_ : Optional[Any] = dis_between_closest_in_strip(
_lowercase , len(_lowercase ) , _lowercase )
return min(_lowercase , _lowercase )
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = column_based_sort(_lowercase , column=0 )
UpperCAmelCase_ : List[Any] = column_based_sort(_lowercase , column=1 )
return (
closest_pair_of_points_sqr(
_lowercase , _lowercase , _lowercase )
) ** 0.5
if __name__ == "__main__":
__a = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print('Distance:', closest_pair_of_points(points, len(points))) | 235 | 1 |
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowercase__( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
@register_to_config
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : bool = False , ) -> Optional[int]:
super().__init__()
lowercase_ = nn.Embedding(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = nn.Embedding(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = False
lowercase_ = nn.Dropout(p=SCREAMING_SNAKE_CASE_ )
lowercase_ = TaConfig(
vocab_size=SCREAMING_SNAKE_CASE_ , d_model=SCREAMING_SNAKE_CASE_ , num_heads=SCREAMING_SNAKE_CASE_ , d_kv=SCREAMING_SNAKE_CASE_ , d_ff=SCREAMING_SNAKE_CASE_ , dropout_rate=SCREAMING_SNAKE_CASE_ , feed_forward_proj=SCREAMING_SNAKE_CASE_ , is_decoder=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , )
lowercase_ = nn.ModuleList()
for lyr_num in range(SCREAMING_SNAKE_CASE_ ):
lowercase_ = TaBlock(SCREAMING_SNAKE_CASE_ )
self.encoders.append(SCREAMING_SNAKE_CASE_ )
lowercase_ = TaLayerNorm(SCREAMING_SNAKE_CASE_ )
lowercase_ = nn.Dropout(p=SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Optional[Any]:
lowercase_ = self.token_embedder(SCREAMING_SNAKE_CASE_ )
lowercase_ = encoder_input_tokens.shape[1]
lowercase_ = torch.arange(SCREAMING_SNAKE_CASE_ , device=encoder_input_tokens.device )
x += self.position_encoding(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.dropout_pre(SCREAMING_SNAKE_CASE_ )
# inverted the attention mask
lowercase_ = encoder_input_tokens.size()
lowercase_ = self.get_extended_attention_mask(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for lyr in self.encoders:
lowercase_ = lyr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )[0]
lowercase_ = self.layer_norm(SCREAMING_SNAKE_CASE_ )
return self.dropout_post(SCREAMING_SNAKE_CASE_ ), encoder_inputs_mask
| 30 |
'''simple docstring'''
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __lowercase ( unittest.TestCase ):
@parameterized.expand([(None,), ('''foo.json''',)] )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : List[str] = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A , config_name=A )
lowerCamelCase_ : List[Any] = GenerationConfig.from_pretrained(A , config_name=A )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , A )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 5_0 )
self.assertEqual(loaded_config.max_length , 2_0 )
self.assertEqual(loaded_config.max_time , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = AutoConfig.from_pretrained('''gpt2''' )
lowerCamelCase_ : Dict = GenerationConfig.from_model_config(A )
lowerCamelCase_ : Optional[int] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(A , A )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[int] = GenerationConfig()
lowerCamelCase_ : Dict = {
'''max_new_tokens''': 1_0_2_4,
'''foo''': '''bar''',
}
lowerCamelCase_ : int = copy.deepcopy(A )
lowerCamelCase_ : str = generation_config.update(**A )
# update_kwargs was not modified (no side effects)
self.assertEqual(A , A )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_0_2_4 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(A , {'''foo''': '''bar'''} )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = GenerationConfig()
lowerCamelCase_ : str = '''bar'''
with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir:
generation_config.save_pretrained(A )
lowerCamelCase_ : Optional[int] = GenerationConfig.from_pretrained(A )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , '''bar''' )
lowerCamelCase_ : Tuple = GenerationConfig.from_model_config(A )
assert not hasattr(A , '''foo''' ) # no new kwargs should be initialized if from config
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , A )
self.assertEqual(default_config.num_beams , 1 )
lowerCamelCase_ : Tuple = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , A )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A )
lowerCamelCase_ : List[str] = GenerationConfig.from_pretrained(A , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , A )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __lowercase ( unittest.TestCase ):
@classmethod
def UpperCAmelCase__ (cls ):
lowerCamelCase_ : Dict = TOKEN
HfFolder.save_token(A )
@classmethod
def UpperCAmelCase__ (cls ):
try:
delete_repo(token=cls._token , repo_id='''test-generation-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''' )
except HTTPError:
pass
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''test-generation-config''' , use_auth_token=self._token )
lowerCamelCase_ : Optional[Any] = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-generation-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
A , repo_id='''test-generation-config''' , push_to_hub=A , use_auth_token=self._token )
lowerCamelCase_ : List[Any] = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token )
lowerCamelCase_ : Optional[Any] = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
A , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=A , use_auth_token=self._token )
lowerCamelCase_ : Optional[int] = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
| 318 | 0 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ):
if p < 2:
raise ValueError('p should not be less than 2!' )
elif p == 2:
return True
lowerCamelCase__ : str = 4
lowerCamelCase__ : List[str] = (1 << p) - 1
for _ in range(p - 2 ):
lowerCamelCase__ : Any = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 370 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A_ : List[str] = {"configuration_encoder_decoder": ["EncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Dict = ["EncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = ["TFEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = ["FlaxEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
A_ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 316 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.