code stringlengths 87 55.2k | code_codestyle int64 0 349 | style_context stringlengths 135 49.1k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
lowerCamelCase_ : Optional[int] = [
"""Audio""",
"""Array2D""",
"""Array3D""",
"""Array4D""",
"""Array5D""",
"""ClassLabel""",
"""Features""",
"""Sequence""",
"""Value""",
"""Image""",
"""Translation""",
"""TranslationVariableLanguages""",
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages | 81 | import math
def lowerCAmelCase_ ( __A ) -> bool:
'''simple docstring'''
return math.sqrt(__A ) * math.sqrt(__A ) == num
def lowerCAmelCase_ ( __A ) -> bool:
'''simple docstring'''
UpperCAmelCase__ = 0
UpperCAmelCase__ = n
while left <= right:
UpperCAmelCase__ = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
UpperCAmelCase__ = mid - 1
else:
UpperCAmelCase__ = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 | 0 |
def _UpperCAmelCase ( snake_case , snake_case ):
"""simple docstring"""
return x if y == 0 else greatest_common_divisor(snake_case , x % y )
def _UpperCAmelCase ( snake_case , snake_case ):
"""simple docstring"""
return (x * y) // greatest_common_divisor(snake_case , snake_case )
def _UpperCAmelCase ( snake_case = 20 ):
"""simple docstring"""
_lowerCAmelCase = 1
for i in range(1 , n + 1 ):
_lowerCAmelCase = lcm(snake_case , snake_case )
return g
if __name__ == "__main__":
print(f"{solution() = }")
| 82 | import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : torch.FloatTensor
__UpperCAmelCase : Optional[torch.FloatTensor] = None
def lowerCAmelCase_ ( __A, __A=0.999, __A="cosine", ) -> Tuple:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(__A ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__A ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
UpperCAmelCase__ = []
for i in range(__A ):
UpperCAmelCase__ = i / num_diffusion_timesteps
UpperCAmelCase__ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__A ) / alpha_bar_fn(__A ), __A ) )
return torch.tensor(__A, dtype=torch.floataa )
class A ( UpperCAmelCase_ , UpperCAmelCase_ ):
@register_to_config
def __init__(self : List[str] , __UpperCAmelCase : int = 1_0_0_0 , __UpperCAmelCase : str = "fixed_small_log" , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[float] = 1.0 , __UpperCAmelCase : str = "epsilon" , __UpperCAmelCase : str = "squaredcos_cap_v2" , ) -> Optional[int]:
"""simple docstring"""
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" )
UpperCAmelCase__ = betas_for_alpha_bar(__UpperCAmelCase )
UpperCAmelCase__ = 1.0 - self.betas
UpperCAmelCase__ = torch.cumprod(self.alphas , dim=0 )
UpperCAmelCase__ = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
UpperCAmelCase__ = 1.0
# setable values
UpperCAmelCase__ = None
UpperCAmelCase__ = torch.from_numpy(np.arange(0 , __UpperCAmelCase )[::-1].copy() )
UpperCAmelCase__ = variance_type
def lowercase_ (self : List[str] , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : Optional[int] = None ) -> torch.FloatTensor:
"""simple docstring"""
return sample
def lowercase_ (self : int , __UpperCAmelCase : int , __UpperCAmelCase : Union[str, torch.device] = None ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = num_inference_steps
UpperCAmelCase__ = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
UpperCAmelCase__ = (np.arange(0 , __UpperCAmelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa )
UpperCAmelCase__ = torch.from_numpy(__UpperCAmelCase ).to(__UpperCAmelCase )
def lowercase_ (self : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : List[str]=None ) -> Tuple:
"""simple docstring"""
if prev_timestep is None:
UpperCAmelCase__ = t - 1
UpperCAmelCase__ = self.alphas_cumprod[t]
UpperCAmelCase__ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
UpperCAmelCase__ = 1 - alpha_prod_t
UpperCAmelCase__ = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
UpperCAmelCase__ = self.betas[t]
else:
UpperCAmelCase__ = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCAmelCase__ = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
UpperCAmelCase__ = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
UpperCAmelCase__ = torch.log(torch.clamp(__UpperCAmelCase , min=1E-20 ) )
UpperCAmelCase__ = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
UpperCAmelCase__ = variance.log()
UpperCAmelCase__ = beta.log()
UpperCAmelCase__ = (predicted_variance + 1) / 2
UpperCAmelCase__ = frac * max_log + (1 - frac) * min_log
return variance
def lowercase_ (self : Optional[int] , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : int , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : bool = True , ) -> Union[UnCLIPSchedulerOutput, Tuple]:
"""simple docstring"""
UpperCAmelCase__ = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
UpperCAmelCase__ , UpperCAmelCase__ = torch.split(__UpperCAmelCase , sample.shape[1] , dim=1 )
else:
UpperCAmelCase__ = None
# 1. compute alphas, betas
if prev_timestep is None:
UpperCAmelCase__ = t - 1
UpperCAmelCase__ = self.alphas_cumprod[t]
UpperCAmelCase__ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
UpperCAmelCase__ = 1 - alpha_prod_t
UpperCAmelCase__ = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
UpperCAmelCase__ = self.betas[t]
UpperCAmelCase__ = self.alphas[t]
else:
UpperCAmelCase__ = 1 - alpha_prod_t / alpha_prod_t_prev
UpperCAmelCase__ = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCAmelCase__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCAmelCase__ = model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"""
" for the UnCLIPScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCAmelCase__ = torch.clamp(
__UpperCAmelCase , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
UpperCAmelCase__ = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
UpperCAmelCase__ = 0
if t > 0:
UpperCAmelCase__ = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=__UpperCAmelCase , device=model_output.device )
UpperCAmelCase__ = self._get_variance(
__UpperCAmelCase , predicted_variance=__UpperCAmelCase , prev_timestep=__UpperCAmelCase , )
if self.variance_type == "fixed_small_log":
UpperCAmelCase__ = variance
elif self.variance_type == "learned_range":
UpperCAmelCase__ = (0.5 * variance).exp()
else:
raise ValueError(
f"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"""
" for the UnCLIPScheduler." )
UpperCAmelCase__ = variance * variance_noise
UpperCAmelCase__ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=__UpperCAmelCase , pred_original_sample=__UpperCAmelCase )
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : torch.IntTensor , ) -> torch.FloatTensor:
"""simple docstring"""
UpperCAmelCase__ = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
UpperCAmelCase__ = timesteps.to(original_samples.device )
UpperCAmelCase__ = alphas_cumprod[timesteps] ** 0.5
UpperCAmelCase__ = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
UpperCAmelCase__ = sqrt_alpha_prod.unsqueeze(-1 )
UpperCAmelCase__ = (1 - alphas_cumprod[timesteps]) ** 0.5
UpperCAmelCase__ = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
UpperCAmelCase__ = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
UpperCAmelCase__ = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 65 | 0 |
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowercase__ ( lowercase ):
lowercase__ = (DEISMultistepScheduler,)
lowercase__ = (("""num_inference_steps""", 25),)
def UpperCamelCase_ ( self : Dict ,**lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = {
'num_train_timesteps': 1000,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'solver_order': 2,
}
config.update(**lowerCamelCase__ )
return config
def UpperCamelCase_ ( self : List[Any] ,lowerCamelCase__ : Tuple=0 ,**lowerCamelCase__ : List[str] ):
'''simple docstring'''
_UpperCamelCase : Dict = dict(self.forward_default_kwargs )
_UpperCamelCase : int = kwargs.pop('num_inference_steps' ,lowerCamelCase__ )
_UpperCamelCase : int = self.dummy_sample
_UpperCamelCase : Union[str, Any] = 0.1 * sample
_UpperCamelCase : str = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
_UpperCamelCase : Optional[Any] = self.get_scheduler_config(**lowerCamelCase__ )
_UpperCamelCase : Optional[int] = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
_UpperCamelCase : Optional[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
_UpperCamelCase : Tuple = scheduler_class.from_pretrained(lowerCamelCase__ )
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
_UpperCamelCase : str = dummy_past_residuals[: new_scheduler.config.solver_order]
_UpperCamelCase , _UpperCamelCase : Any = sample, sample
for t in range(lowerCamelCase__ ,time_step + scheduler.config.solver_order + 1 ):
_UpperCamelCase : int = scheduler.step(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
_UpperCamelCase : Optional[Any] = new_scheduler.step(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : List[Any] ,lowerCamelCase__ : List[str]=0 ,**lowerCamelCase__ : Dict ):
'''simple docstring'''
_UpperCamelCase : List[str] = dict(self.forward_default_kwargs )
_UpperCamelCase : List[Any] = kwargs.pop('num_inference_steps' ,lowerCamelCase__ )
_UpperCamelCase : int = self.dummy_sample
_UpperCamelCase : str = 0.1 * sample
_UpperCamelCase : Tuple = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
_UpperCamelCase : Optional[int] = self.get_scheduler_config()
_UpperCamelCase : Tuple = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
_UpperCamelCase : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
_UpperCamelCase : Optional[int] = scheduler_class.from_pretrained(lowerCamelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residual (must be after setting timesteps)
_UpperCamelCase : Optional[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
_UpperCamelCase : Optional[int] = scheduler.step(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
_UpperCamelCase : List[Any] = new_scheduler.step(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : int=None ,**lowerCamelCase__ : Any ):
'''simple docstring'''
if scheduler is None:
_UpperCamelCase : Any = self.scheduler_classes[0]
_UpperCamelCase : Optional[Any] = self.get_scheduler_config(**lowerCamelCase__ )
_UpperCamelCase : str = scheduler_class(**lowerCamelCase__ )
_UpperCamelCase : List[Any] = self.scheduler_classes[0]
_UpperCamelCase : str = self.get_scheduler_config(**lowerCamelCase__ )
_UpperCamelCase : int = scheduler_class(**lowerCamelCase__ )
_UpperCamelCase : Dict = 10
_UpperCamelCase : Optional[int] = self.dummy_model()
_UpperCamelCase : Tuple = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
_UpperCamelCase : Dict = model(lowerCamelCase__ ,lowerCamelCase__ )
_UpperCamelCase : List[str] = scheduler.step(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ).prev_sample
return sample
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : Any = dict(self.forward_default_kwargs )
_UpperCamelCase : Dict = kwargs.pop('num_inference_steps' ,lowerCamelCase__ )
for scheduler_class in self.scheduler_classes:
_UpperCamelCase : Union[str, Any] = self.get_scheduler_config()
_UpperCamelCase : Optional[int] = scheduler_class(**lowerCamelCase__ )
_UpperCamelCase : str = self.dummy_sample
_UpperCamelCase : Optional[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCamelCase__ ,'set_timesteps' ):
scheduler.set_timesteps(lowerCamelCase__ )
elif num_inference_steps is not None and not hasattr(lowerCamelCase__ ,'set_timesteps' ):
_UpperCamelCase : List[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_UpperCamelCase : Tuple = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
_UpperCamelCase : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
_UpperCamelCase : Union[str, Any] = scheduler.timesteps[5]
_UpperCamelCase : int = scheduler.timesteps[6]
_UpperCamelCase : Tuple = scheduler.step(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
_UpperCamelCase : str = scheduler.step(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
# make sure that iterating over schedulers with same config names gives same results
# for defaults
_UpperCamelCase : List[str] = DEISMultistepScheduler(**self.get_scheduler_config() )
_UpperCamelCase : Any = self.full_loop(scheduler=lowerCamelCase__ )
_UpperCamelCase : List[Any] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1E-3
_UpperCamelCase : str = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_UpperCamelCase : List[str] = DPMSolverMultistepScheduler.from_config(scheduler.config )
_UpperCamelCase : Union[str, Any] = UniPCMultistepScheduler.from_config(scheduler.config )
_UpperCamelCase : List[str] = DEISMultistepScheduler.from_config(scheduler.config )
_UpperCamelCase : Dict = self.full_loop(scheduler=lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1E-3
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase__ )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
self.check_over_configs(thresholding=lowerCamelCase__ )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowerCamelCase__ ,prediction_type=lowerCamelCase__ ,sample_max_value=lowerCamelCase__ ,algorithm_type='deis' ,solver_order=lowerCamelCase__ ,solver_type=lowerCamelCase__ ,)
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase__ )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowerCamelCase__ ,solver_type=lowerCamelCase__ ,prediction_type=lowerCamelCase__ ,algorithm_type=lowerCamelCase__ ,)
_UpperCamelCase : int = self.full_loop(
solver_order=lowerCamelCase__ ,solver_type=lowerCamelCase__ ,prediction_type=lowerCamelCase__ ,algorithm_type=lowerCamelCase__ ,)
assert not torch.isnan(lowerCamelCase__ ).any(), "Samples have nan numbers"
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
self.check_over_configs(lower_order_final=lowerCamelCase__ )
self.check_over_configs(lower_order_final=lowerCamelCase__ )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=lowerCamelCase__ ,time_step=0 )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : str = self.full_loop()
_UpperCamelCase : Tuple = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1E-3
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = self.full_loop(prediction_type='v_prediction' )
_UpperCamelCase : Tuple = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.0_9_1 ) < 1E-3
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = self.scheduler_classes[0]
_UpperCamelCase : List[Any] = self.get_scheduler_config(thresholding=lowerCamelCase__ ,dynamic_thresholding_ratio=0 )
_UpperCamelCase : Any = scheduler_class(**lowerCamelCase__ )
_UpperCamelCase : Dict = 10
_UpperCamelCase : List[Any] = self.dummy_model()
_UpperCamelCase : Any = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowerCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
_UpperCamelCase : Optional[Any] = model(lowerCamelCase__ ,lowerCamelCase__ )
_UpperCamelCase : List[Any] = scheduler.step(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ).prev_sample
assert sample.dtype == torch.floataa
| 83 | import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class A ( unittest.TestCase ):
def lowercase_ (self : Union[str, Any] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = inspect.getfile(accelerate.test_utils )
UpperCAmelCase__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
UpperCAmelCase__ = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] )
UpperCAmelCase__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] )
@require_multi_gpu
def lowercase_ (self : List[str] ) -> Any:
"""simple docstring"""
print(f"""Found {torch.cuda.device_count()} devices.""" )
UpperCAmelCase__ = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() )
@require_multi_gpu
def lowercase_ (self : str ) -> str:
"""simple docstring"""
print(f"""Found {torch.cuda.device_count()} devices.""" )
UpperCAmelCase__ = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path]
print(f"""Command: {cmd}""" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() )
@require_multi_gpu
def lowercase_ (self : Tuple ) -> int:
"""simple docstring"""
UpperCAmelCase__ = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() )
@require_multi_gpu
def lowercase_ (self : Dict ) -> str:
"""simple docstring"""
print(f"""Found {torch.cuda.device_count()} devices, using 2 devices only""" )
UpperCAmelCase__ = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() )
if __name__ == "__main__":
UpperCamelCase__ = Accelerator()
UpperCamelCase__ = (accelerator.state.process_index + 2, 1_0)
UpperCamelCase__ = torch.randint(0, 1_0, shape).to(accelerator.device)
UpperCamelCase__ = ''
UpperCamelCase__ = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
UpperCamelCase__ = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
UpperCamelCase__ = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 65 | 0 |
"""simple docstring"""
from __future__ import annotations
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A , __A ) -> List[Any]:
lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = text, pattern
lowerCAmelCase_ , lowerCAmelCase_ :List[str] = len(__A ), len(__A )
def __lowerCAmelCase ( self , __A ) -> int:
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def __lowerCAmelCase ( self , __A ) -> int:
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def __lowerCAmelCase ( self ) -> list[int]:
# searches pattern in text and returns index positions
lowerCAmelCase_ :List[str] = []
for i in range(self.textLen - self.patLen + 1 ):
lowerCAmelCase_ :Any = self.mismatch_in_text(__A )
if mismatch_index == -1:
positions.append(__A )
else:
lowerCAmelCase_ :int = self.match_in_pattern(self.text[mismatch_index] )
lowerCAmelCase_ :Any = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
__UpperCAmelCase = 'ABAABA'
__UpperCAmelCase = 'AB'
__UpperCAmelCase = BoyerMooreSearch(text, pattern)
__UpperCAmelCase = bms.bad_character_heuristic()
if len(positions) == 0:
print('No match found')
else:
print('Pattern found in following positions: ')
print(positions)
| 84 | import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def lowerCAmelCase_ ( __A ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
"decoder.output_projection.weight",
]
for k in ignore_keys:
state_dict.pop(__A, __A )
def lowerCAmelCase_ ( __A ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ = emb.weight.shape
UpperCAmelCase__ = nn.Linear(__A, __A, bias=__A )
UpperCAmelCase__ = emb.weight.data
return lin_layer
def lowerCAmelCase_ ( __A, __A="facebook/mbart-large-en-ro", __A=False, __A=False ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ = torch.load(__A, map_location="cpu" )["model"]
remove_ignore_keys_(__A )
UpperCAmelCase__ = state_dict["encoder.embed_tokens.weight"].shape[0]
UpperCAmelCase__ = MBartConfig.from_pretrained(__A, vocab_size=__A )
if mbart_aa and finetuned:
UpperCAmelCase__ = "relu"
UpperCAmelCase__ = state_dict["decoder.embed_tokens.weight"]
UpperCAmelCase__ = MBartForConditionalGeneration(__A )
model.model.load_state_dict(__A )
if finetuned:
UpperCAmelCase__ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config',
default='facebook/mbart-large-cc25',
type=str,
help='Which huggingface architecture to use: mbart-large',
)
parser.add_argument('--mbart_50', action='store_true', help='whether the model is mMART-50 checkpoint')
parser.add_argument('--finetuned', action='store_true', help='whether the model is a fine-tuned checkpoint')
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 65 | 0 |
'''simple docstring'''
import argparse
import os
import re
_SCREAMING_SNAKE_CASE : List[Any] = "src/transformers/models/auto"
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
_SCREAMING_SNAKE_CASE : Union[str, Any] = re.compile(r"[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict")
# re pattern that matches identifiers in mappings
_SCREAMING_SNAKE_CASE : Optional[Any] = re.compile(r"\s*\(\s*\"(\S[^\"]+)\"")
def UpperCamelCase_( snake_case : str , snake_case : bool = False ):
'''simple docstring'''
with open(snake_case , "r" , encoding="utf-8" ) as f:
snake_case_ = f.read()
snake_case_ = content.split("\n" )
snake_case_ = []
snake_case_ = 0
while line_idx < len(snake_case ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
snake_case_ = len(re.search(r"^(\s*)\S" , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
snake_case_ = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
snake_case_ = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
snake_case_ = sorted(snake_case , key=lambda snake_case : _re_identifier.search(snake_case ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(snake_case , "w" , encoding="utf-8" ) as f:
f.write("\n".join(snake_case ) )
elif "\n".join(snake_case ) != content:
return True
def UpperCamelCase_( snake_case : bool = False ):
'''simple docstring'''
snake_case_ = [os.path.join(snake_case , snake_case ) for f in os.listdir(snake_case ) if f.endswith(".py" )]
snake_case_ = [sort_auto_mapping(snake_case , overwrite=snake_case ) for fname in fnames]
if not overwrite and any(snake_case ):
snake_case_ = [f for f, d in zip(snake_case , snake_case ) if d]
raise ValueError(
f'The following files have auto mappings that need sorting: {", ".join(snake_case )}. Run `make style` to fix'
" this." )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
_SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 85 | from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
UpperCamelCase__ = [
'python',
'tqdm',
'regex',
'requests',
'packaging',
'filelock',
'numpy',
'tokenizers',
'huggingface-hub',
'safetensors',
'accelerate',
'pyyaml',
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def lowerCAmelCase_ ( __A, __A=None ) -> Dict:
'''simple docstring'''
require_version(deps[pkg], __A )
| 65 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
lowerCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase__ = """
Examples:
```py
>>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline
>>> import torch
>>> pipe_prior = KandinskyPriorPipeline.from_pretrained(\"kandinsky-community/Kandinsky-2-1-prior\")
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"red cat, 4k photo\"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> negative_image_emb = out.negative_image_embeds
>>> pipe = KandinskyPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-1\")
>>> pipe.to(\"cuda\")
>>> image = pipe(
... prompt,
... image_embeds=image_emb,
... negative_image_embeds=negative_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... ).images
>>> image[0].save(\"cat.png\")
```
"""
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase=8 ):
__lowerCAmelCase : Dict = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
__lowerCAmelCase : List[str] = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class A__ ( _lowerCamelCase):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
super().__init__()
self.register_modules(
text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , movq=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if latents is None:
__lowerCAmelCase : Tuple = randn_tensor(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
__lowerCAmelCase : Any = latents.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = latents * scheduler.init_noise_sigma
return latents
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , ):
__lowerCAmelCase : Tuple = len(_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else 1
# get prompt text embeddings
__lowerCAmelCase : Dict = self.tokenizer(
_SCREAMING_SNAKE_CASE , padding='max_length' , truncation=_SCREAMING_SNAKE_CASE , max_length=77 , return_attention_mask=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors='pt' , )
__lowerCAmelCase : Tuple = text_inputs.input_ids
__lowerCAmelCase : Union[str, Any] = self.tokenizer(_SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='pt' ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
__lowerCAmelCase : Dict = text_input_ids.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = text_inputs.attention_mask.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : str = self.text_encoder(
input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = prompt_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
__lowerCAmelCase : Dict = text_encoder_hidden_states.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
__lowerCAmelCase : Optional[int] = text_mask.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
if do_classifier_free_guidance:
__lowerCAmelCase : List[str]
if negative_prompt is None:
__lowerCAmelCase : Union[str, Any] = [''] * batch_size
elif type(_SCREAMING_SNAKE_CASE ) is not type(_SCREAMING_SNAKE_CASE ):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(_SCREAMING_SNAKE_CASE )} !="
f" {type(_SCREAMING_SNAKE_CASE )}." )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = [negative_prompt]
elif batch_size != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(_SCREAMING_SNAKE_CASE )}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
' the batch size of `prompt`.' )
else:
__lowerCAmelCase : Optional[int] = negative_prompt
__lowerCAmelCase : Tuple = self.tokenizer(
_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=77 , truncation=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors='pt' , )
__lowerCAmelCase : Union[str, Any] = uncond_input.input_ids.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = uncond_input.attention_mask.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : Any = self.text_encoder(
input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__lowerCAmelCase : List[str] = negative_prompt_embeds.shape[1]
__lowerCAmelCase : Any = negative_prompt_embeds.repeat(1 , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = negative_prompt_embeds.view(batch_size * num_images_per_prompt , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = uncond_text_encoder_hidden_states.shape[1]
__lowerCAmelCase : List[Any] = uncond_text_encoder_hidden_states.repeat(1 , _SCREAMING_SNAKE_CASE , 1 )
__lowerCAmelCase : Optional[int] = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , _SCREAMING_SNAKE_CASE , -1 )
__lowerCAmelCase : Optional[Any] = uncond_text_mask.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowerCAmelCase : Tuple = torch.cat([negative_prompt_embeds, prompt_embeds] )
__lowerCAmelCase : Tuple = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
__lowerCAmelCase : int = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
__lowerCAmelCase : Union[str, Any] = torch.device(f"cuda:{gpu_id}" )
__lowerCAmelCase : List[Any] = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=0 ):
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
__lowerCAmelCase : str = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_SCREAMING_SNAKE_CASE )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__lowerCAmelCase : Any = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
__lowerCAmelCase , __lowerCAmelCase : Any = cpu_offload_with_hook(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , prev_module_hook=_SCREAMING_SNAKE_CASE )
if self.safety_checker is not None:
__lowerCAmelCase , __lowerCAmelCase : Dict = cpu_offload_with_hook(self.safety_checker , _SCREAMING_SNAKE_CASE , prev_module_hook=_SCREAMING_SNAKE_CASE )
# We'll offload the last model manually.
__lowerCAmelCase : Optional[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __lowerCamelCase ( self ):
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_SCREAMING_SNAKE_CASE , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 5_12 , _SCREAMING_SNAKE_CASE = 5_12 , _SCREAMING_SNAKE_CASE = 1_00 , _SCREAMING_SNAKE_CASE = 4.0 , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , ):
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[str] = 1
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = len(_SCREAMING_SNAKE_CASE )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(_SCREAMING_SNAKE_CASE )}" )
__lowerCAmelCase : Dict = self._execution_device
__lowerCAmelCase : Optional[Any] = batch_size * num_images_per_prompt
__lowerCAmelCase : Optional[int] = guidance_scale > 1.0
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : int = self._encode_prompt(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : int = torch.cat(_SCREAMING_SNAKE_CASE , dim=0 )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = torch.cat(_SCREAMING_SNAKE_CASE , dim=0 )
if do_classifier_free_guidance:
__lowerCAmelCase : Optional[Any] = image_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
__lowerCAmelCase : int = negative_image_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
__lowerCAmelCase : Any = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=_SCREAMING_SNAKE_CASE )
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = self.scheduler.timesteps
__lowerCAmelCase : int = self.unet.config.in_channels
__lowerCAmelCase , __lowerCAmelCase : Any = get_new_h_w(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.movq_scale_factor )
# create initial latent
__lowerCAmelCase : str = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.scheduler , )
for i, t in enumerate(self.progress_bar(_SCREAMING_SNAKE_CASE ) ):
# expand the latents if we are doing classifier free guidance
__lowerCAmelCase : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowerCAmelCase : Union[str, Any] = {'text_embeds': prompt_embeds, 'image_embeds': image_embeds}
__lowerCAmelCase : Optional[Any] = self.unet(
sample=_SCREAMING_SNAKE_CASE , timestep=_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , added_cond_kwargs=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , )[0]
if do_classifier_free_guidance:
__lowerCAmelCase , __lowerCAmelCase : Dict = noise_pred.split(latents.shape[1] , dim=1 )
__lowerCAmelCase , __lowerCAmelCase : Optional[Any] = noise_pred.chunk(2 )
__lowerCAmelCase , __lowerCAmelCase : int = variance_pred.chunk(2 )
__lowerCAmelCase : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__lowerCAmelCase : Any = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__lowerCAmelCase : List[str] = self.scheduler.step(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , ).prev_sample
# post-processing
__lowerCAmelCase : Tuple = self.movq.decode(_SCREAMING_SNAKE_CASE , force_not_quantize=_SCREAMING_SNAKE_CASE )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
__lowerCAmelCase : List[str] = image * 0.5 + 0.5
__lowerCAmelCase : Dict = image.clamp(0 , 1 )
__lowerCAmelCase : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowerCAmelCase : Union[str, Any] = self.numpy_to_pil(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_SCREAMING_SNAKE_CASE ) | 86 | import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
UpperCamelCase__ = logging.getLogger(__name__)
def lowerCAmelCase_ ( ) -> int:
'''simple docstring'''
UpperCAmelCase__ = argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." )
parser.add_argument("--file_path", type=__A, default="data/dump.txt", help="The path to the data." )
parser.add_argument("--tokenizer_type", type=__A, default="bert", choices=["bert", "roberta", "gpt2"] )
parser.add_argument("--tokenizer_name", type=__A, default="bert-base-uncased", help="The tokenizer to use." )
parser.add_argument("--dump_file", type=__A, default="data/dump", help="The dump file prefix." )
UpperCAmelCase__ = parser.parse_args()
logger.info(f"""Loading Tokenizer ({args.tokenizer_name})""" )
if args.tokenizer_type == "bert":
UpperCAmelCase__ = BertTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase__ = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
UpperCAmelCase__ = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
UpperCAmelCase__ = RobertaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase__ = tokenizer.special_tokens_map["cls_token"] # `<s>`
UpperCAmelCase__ = tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
UpperCAmelCase__ = GPTaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase__ = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
UpperCAmelCase__ = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(f"""Loading text from {args.file_path}""" )
with open(args.file_path, "r", encoding="utf8" ) as fp:
UpperCAmelCase__ = fp.readlines()
logger.info("Start encoding" )
logger.info(f"""{len(__A )} examples to process.""" )
UpperCAmelCase__ = []
UpperCAmelCase__ = 0
UpperCAmelCase__ = 10_000
UpperCAmelCase__ = time.time()
for text in data:
UpperCAmelCase__ = f"""{bos} {text.strip()} {sep}"""
UpperCAmelCase__ = tokenizer.encode(__A, add_special_tokens=__A )
rslt.append(__A )
iter += 1
if iter % interval == 0:
UpperCAmelCase__ = time.time()
logger.info(f"""{iter} examples processed. - {(end-start):.2f}s/{interval}expl""" )
UpperCAmelCase__ = time.time()
logger.info("Finished binarization" )
logger.info(f"""{len(__A )} examples processed.""" )
UpperCAmelCase__ = f"""{args.dump_file}.{args.tokenizer_name}.pickle"""
UpperCAmelCase__ = tokenizer.vocab_size
if vocab_size < (1 << 16):
UpperCAmelCase__ = [np.uintaa(__A ) for d in rslt]
else:
UpperCAmelCase__ = [np.intaa(__A ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f"""Dump to {dp_file}""" )
with open(__A, "wb" ) as handle:
pickle.dump(rslt_, __A, protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 65 | 0 |
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : str , **_lowerCamelCase : Tuple):
lowercase__ : Tuple = AutoConfig.from_pretrained(_lowerCamelCase , **_lowerCamelCase)
lowercase__ : Tuple = AutoModelForSeqaSeqLM.from_config(_lowerCamelCase)
model.save_pretrained(_lowerCamelCase)
AutoTokenizer.from_pretrained(_lowerCamelCase).save_pretrained(_lowerCamelCase)
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 87 | from manim import *
class A ( UpperCAmelCase_ ):
def lowercase_ (self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = Rectangle(height=0.5 , width=0.5 )
UpperCAmelCase__ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCAmelCase__ = Rectangle(height=0.25 , width=0.25 )
UpperCAmelCase__ = [mem.copy() for i in range(6 )]
UpperCAmelCase__ = [mem.copy() for i in range(6 )]
UpperCAmelCase__ = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase__ = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase__ = VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase__ = Text("CPU" , font_size=2_4 )
UpperCAmelCase__ = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__UpperCAmelCase )
UpperCAmelCase__ = [mem.copy() for i in range(4 )]
UpperCAmelCase__ = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase__ = Text("GPU" , font_size=2_4 )
UpperCAmelCase__ = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(__UpperCAmelCase )
UpperCAmelCase__ = [mem.copy() for i in range(6 )]
UpperCAmelCase__ = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase__ = Text("Model" , font_size=2_4 )
UpperCAmelCase__ = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(__UpperCAmelCase )
UpperCAmelCase__ = []
UpperCAmelCase__ = []
for i, rect in enumerate(__UpperCAmelCase ):
UpperCAmelCase__ = fill.copy().set_fill(__UpperCAmelCase , opacity=0.8 )
target.move_to(__UpperCAmelCase )
model_arr.append(__UpperCAmelCase )
UpperCAmelCase__ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(__UpperCAmelCase , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(__UpperCAmelCase )
self.add(*__UpperCAmelCase , *__UpperCAmelCase )
UpperCAmelCase__ = [meta_mem.copy() for i in range(6 )]
UpperCAmelCase__ = [meta_mem.copy() for i in range(6 )]
UpperCAmelCase__ = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase__ = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase__ = VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase__ = Text("Disk" , font_size=2_4 )
UpperCAmelCase__ = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
disk.move_to([-4, -1.25, 0] )
self.add(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCAmelCase__ = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=1_8 , )
blue_text.next_to(__UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__UpperCAmelCase )
UpperCAmelCase__ = MarkupText(
f"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCAmelCase ) )
UpperCAmelCase__ = Square(0.3 )
input.set_fill(__UpperCAmelCase , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , __UpperCAmelCase , buff=0.5 )
self.play(Write(__UpperCAmelCase ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=__UpperCAmelCase , buff=0.02 )
self.play(MoveToTarget(__UpperCAmelCase ) )
self.play(FadeOut(__UpperCAmelCase ) )
UpperCAmelCase__ = Arrow(start=__UpperCAmelCase , end=__UpperCAmelCase , color=__UpperCAmelCase , buff=0.5 )
a.next_to(model_arr[0].get_left() , __UpperCAmelCase , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
UpperCAmelCase__ = MarkupText(
f"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCAmelCase , run_time=3 ) )
UpperCAmelCase__ = {"run_time": 1, "fade_in": True, "fade_out": True, "buff": 0.02}
self.play(
Write(__UpperCAmelCase ) , Circumscribe(model_arr[0] , color=__UpperCAmelCase , **__UpperCAmelCase ) , Circumscribe(model_cpu_arr[0] , color=__UpperCAmelCase , **__UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=__UpperCAmelCase , **__UpperCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
UpperCAmelCase__ = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , __UpperCAmelCase , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
UpperCAmelCase__ = AnimationGroup(
FadeOut(__UpperCAmelCase , run_time=0.5 ) , MoveToTarget(__UpperCAmelCase , run_time=0.5 ) , FadeIn(__UpperCAmelCase , run_time=0.5 ) , lag_ratio=0.2 )
self.play(__UpperCAmelCase )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
UpperCAmelCase__ = 0.7
self.play(
Circumscribe(model_arr[i] , **__UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i] , **__UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=__UpperCAmelCase , **__UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=__UpperCAmelCase , **__UpperCAmelCase ) , Circumscribe(model_arr[i + 1] , color=__UpperCAmelCase , **__UpperCAmelCase ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=__UpperCAmelCase , **__UpperCAmelCase ) , Circumscribe(cpu_left_col_base[-1] , color=__UpperCAmelCase , **__UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=__UpperCAmelCase , **__UpperCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
UpperCAmelCase__ = a_c
UpperCAmelCase__ = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(__UpperCAmelCase ) , FadeOut(__UpperCAmelCase , run_time=0.5 ) , )
UpperCAmelCase__ = MarkupText(f"""Inference on a model too large for GPU memory\nis successfully completed.""" , font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCAmelCase , run_time=3 ) , MoveToTarget(__UpperCAmelCase ) )
self.wait()
| 65 | 0 |
def a__ ( A_, A_ ):
'''simple docstring'''
return 1 if input_a == input_a else 0
def a__ ( ):
'''simple docstring'''
assert xnor_gate(0, 0 ) == 1
assert xnor_gate(0, 1 ) == 0
assert xnor_gate(1, 0 ) == 0
assert xnor_gate(1, 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 88 | from __future__ import annotations
from scipy.special import comb # type: ignore
class A :
def __init__(self : List[Any] , __UpperCAmelCase : list[tuple[float, float]] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
UpperCAmelCase__ = len(__UpperCAmelCase ) - 1
def lowercase_ (self : int , __UpperCAmelCase : float ) -> list[float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
UpperCAmelCase__ = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , __UpperCAmelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(__UpperCAmelCase ) , 5 ) == 1
return output_values
def lowercase_ (self : Dict , __UpperCAmelCase : float ) -> tuple[float, float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
UpperCAmelCase__ = self.basis_function(__UpperCAmelCase )
UpperCAmelCase__ = 0.0
UpperCAmelCase__ = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def lowercase_ (self : Optional[int] , __UpperCAmelCase : float = 0.01 ) -> Optional[int]:
"""simple docstring"""
from matplotlib import pyplot as plt # type: ignore
UpperCAmelCase__ = [] # x coordinates of points to plot
UpperCAmelCase__ = [] # y coordinates of points to plot
UpperCAmelCase__ = 0.0
while t <= 1:
UpperCAmelCase__ = self.bezier_curve_function(__UpperCAmelCase )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
UpperCAmelCase__ = [i[0] for i in self.list_of_points]
UpperCAmelCase__ = [i[1] for i in self.list_of_points]
plt.plot(
__UpperCAmelCase , __UpperCAmelCase , color="blue" , label="Curve of Degree " + str(self.degree ) , )
plt.scatter(__UpperCAmelCase , __UpperCAmelCase , color="red" , label="Control Points" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 65 | 0 |
'''simple docstring'''
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
_a : List[str] = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
_a : Optional[Any] = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw ).convert('RGB' )
_a : Optional[Any] = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48_145_466, 0.4_578_275, 0.40_821_073) , (0.26_862_954, 0.26_130_258, 0.27_577_711) ),
] )
_a : List[str] = transform(lowerCAmelCase_ ).unsqueeze(0 ).to(lowerCAmelCase_ )
return image
def __lowerCamelCase ( lowerCAmelCase_ ) -> int:
if "visual_encoder" in key:
_a : List[str] = re.sub('visual_encoder*' , 'vision_model.encoder' , lowerCAmelCase_ )
if "blocks" in key:
_a : int = re.sub(r'blocks' , 'layers' , lowerCAmelCase_ )
if "attn" in key:
_a : Optional[int] = re.sub(r'attn' , 'self_attn' , lowerCAmelCase_ )
if "norm1" in key:
_a : List[str] = re.sub(r'norm1' , 'layer_norm1' , lowerCAmelCase_ )
if "norm2" in key:
_a : str = re.sub(r'norm2' , 'layer_norm2' , lowerCAmelCase_ )
if "encoder.norm" in key:
_a : int = re.sub(r'encoder.norm' , 'post_layernorm' , lowerCAmelCase_ )
if "encoder.patch_embed.proj" in key:
_a : Union[str, Any] = re.sub(r'encoder.patch_embed.proj' , 'embeddings.patch_embedding' , lowerCAmelCase_ )
if "encoder.pos_embed" in key:
_a : Tuple = re.sub(r'encoder.pos_embed' , 'embeddings.position_embedding' , lowerCAmelCase_ )
if "encoder.cls_token" in key:
_a : Tuple = re.sub(r'encoder.cls_token' , 'embeddings.class_embedding' , lowerCAmelCase_ )
if "self_attn" in key:
_a : Any = re.sub(r'self_attn.proj' , 'self_attn.projection' , lowerCAmelCase_ )
return key
@torch.no_grad()
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=None ) -> int:
if config_path is not None:
_a : Tuple = BlipConfig.from_pretrained(lowerCAmelCase_ )
else:
_a : int = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
_a : Dict = BlipForConditionalGeneration(lowerCAmelCase_ ).eval()
_a : Any = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'
_a : List[str] = blip_decoder(pretrained=lowerCAmelCase_ , image_size=384 , vit='base' )
_a : int = pt_model.eval()
_a : Any = pt_model.state_dict()
for key in modified_state_dict.copy():
_a : List[str] = modified_state_dict.pop(lowerCAmelCase_ )
_a : Optional[Any] = rename_key(lowerCAmelCase_ )
_a : Any = value
hf_model.load_state_dict(lowerCAmelCase_ )
_a : Optional[int] = 384
_a : List[str] = load_demo_image(image_size=lowerCAmelCase_ , device='cpu' )
_a : Optional[Any] = BertTokenizer.from_pretrained('bert-base-uncased' )
_a : int = tokenizer(['a picture of'] ).input_ids
_a : List[str] = hf_model.generate(lowerCAmelCase_ , lowerCAmelCase_ )
assert out[0].tolist() == [30522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
_a : List[str] = hf_model.generate(lowerCAmelCase_ )
assert out[0].tolist() == [30522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(lowerCAmelCase_ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
_a : Tuple = (
'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'
)
_a : Optional[Any] = blip_vqa(pretrained=lowerCAmelCase_ , image_size=lowerCAmelCase_ , vit='base' )
vqa_model.eval()
_a : Optional[int] = vqa_model.state_dict()
for key in modified_state_dict.copy():
_a : int = modified_state_dict.pop(lowerCAmelCase_ )
_a : Optional[int] = rename_key(lowerCAmelCase_ )
_a : Dict = value
_a : Optional[int] = BlipForQuestionAnswering(lowerCAmelCase_ )
hf_vqa_model.load_state_dict(lowerCAmelCase_ )
_a : List[str] = ['How many dogs are in this image?']
_a : str = tokenizer(lowerCAmelCase_ , return_tensors='pt' ).input_ids
_a : Dict = hf_vqa_model.generate(lowerCAmelCase_ , lowerCAmelCase_ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '_vqa' )
_a : int = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'
_a : List[str] = blip_itm(pretrained=lowerCAmelCase_ , image_size=lowerCAmelCase_ , vit='base' )
itm_model.eval()
_a : Any = itm_model.state_dict()
for key in modified_state_dict.copy():
_a : Optional[int] = modified_state_dict.pop(lowerCAmelCase_ )
_a : Any = rename_key(lowerCAmelCase_ )
_a : Dict = value
_a : Tuple = BlipForImageTextRetrieval(lowerCAmelCase_ )
_a : Any = ['A picture of a woman with a dog sitting in a beach']
_a : List[str] = tokenizer(
lowerCAmelCase_ , return_tensors='pt' , padding='max_length' , truncation=lowerCAmelCase_ , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(lowerCAmelCase_ )
hf_itm_model.eval()
_a : str = hf_itm_model(lowerCAmelCase_ , lowerCAmelCase_ , use_itm_head=lowerCAmelCase_ )
_a : Any = hf_itm_model(lowerCAmelCase_ , lowerCAmelCase_ , use_itm_head=lowerCAmelCase_ )
assert out[0].item() == 0.2_110_687_494_277_954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.45_698_845_386_505_127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '_itm' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
__lowerCAmelCase = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 89 | import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(UpperCAmelCase_ ) , 'Tatoeba directory does not exist.' )
class A ( unittest.TestCase ):
@cached_property
def lowercase_ (self : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = tempfile.mkdtemp()
return TatoebaConverter(save_dir=__UpperCAmelCase )
@slow
def lowercase_ (self : List[Any] ) -> Optional[int]:
"""simple docstring"""
self.resolver.convert_models(["heb-eng"] )
@slow
def lowercase_ (self : Dict ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.resolver.write_model_card("opus-mt-he-en" , dry_run=__UpperCAmelCase )
assert mmeta["long_pair"] == "heb-eng"
| 65 | 0 |
import numpy as np
from transformers import Pipeline
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> Any:
"""simple docstring"""
__lowerCamelCase = np.max(UpperCamelCase__ , axis=-1 , keepdims=UpperCamelCase__ )
__lowerCamelCase = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=UpperCamelCase__ )
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def lowercase_ ( self , **lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = {}
if "second_text" in kwargs:
__lowerCamelCase = kwargs['second_text']
return preprocess_kwargs, {}, {}
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__=None ) -> Tuple:
'''simple docstring'''
return self.tokenizer(lowerCamelCase__ , text_pair=lowerCamelCase__ , return_tensors=self.framework )
def lowercase_ ( self , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
return self.model(**lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = model_outputs.logits[0].numpy()
__lowerCamelCase = softmax(lowerCamelCase__ )
__lowerCamelCase = np.argmax(lowerCamelCase__ )
__lowerCamelCase = self.model.config.idalabel[best_class]
__lowerCamelCase = probabilities[best_class].item()
__lowerCamelCase = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 90 | import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
UpperCamelCase__ = np.linspace(start=0, stop=7_5, num=7_5, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
UpperCamelCase__ = [0, 2_5, 5_0]
UpperCamelCase__ = [2_5, 5_0, 7_5]
UpperCamelCase__ = fuzz.membership.trimf(X, abca)
UpperCamelCase__ = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
UpperCamelCase__ = np.ones(7_5)
UpperCamelCase__ = np.zeros((7_5,))
# 1. Union = max(µA(x), µB(x))
UpperCamelCase__ = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
UpperCamelCase__ = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
UpperCamelCase__ = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
UpperCamelCase__ = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
UpperCamelCase__ = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
UpperCamelCase__ = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
UpperCamelCase__ = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
UpperCamelCase__ = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 1_0)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 65 | 0 |
"""simple docstring"""
from manim import *
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = Rectangle(height=0.5 , width=0.5)
SCREAMING_SNAKE_CASE_ : Dict = Rectangle(height=0.46 , width=0.46).set_stroke(width=0)
SCREAMING_SNAKE_CASE_ : Any = [mem.copy() for i in range(6)]
SCREAMING_SNAKE_CASE_ : Optional[int] = [mem.copy() for i in range(6)]
SCREAMING_SNAKE_CASE_ : Optional[int] = VGroup(*lowercase_).arrange(lowercase_ , buff=0)
SCREAMING_SNAKE_CASE_ : Optional[int] = VGroup(*lowercase_).arrange(lowercase_ , buff=0)
SCREAMING_SNAKE_CASE_ : List[Any] = VGroup(lowercase_ , lowercase_).arrange(lowercase_ , buff=0)
SCREAMING_SNAKE_CASE_ : List[str] = Text('''CPU''' , font_size=24)
SCREAMING_SNAKE_CASE_ : Optional[Any] = Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
cpu.move_to([-2.5, -0.5, 0])
self.add(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = [mem.copy() for i in range(1)]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = VGroup(*lowercase_).arrange(lowercase_ , buff=0)
SCREAMING_SNAKE_CASE_ : List[str] = Text('''GPU''' , font_size=24)
SCREAMING_SNAKE_CASE_ : Optional[int] = Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
gpu.align_to(lowercase_ , lowercase_)
gpu.set_x(gpu.get_x() - 1)
self.add(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = [mem.copy() for i in range(6)]
SCREAMING_SNAKE_CASE_ : Tuple = VGroup(*lowercase_).arrange(lowercase_ , buff=0)
SCREAMING_SNAKE_CASE_ : Optional[int] = Text('''Model''' , font_size=24)
SCREAMING_SNAKE_CASE_ : Tuple = Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
model.move_to([3, -1.0, 0])
self.play(
Create(lowercase_ , run_time=1) , Create(lowercase_ , run_time=1) , Create(lowercase_ , run_time=1) , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = MarkupText(
F'First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.' , font_size=24 , )
SCREAMING_SNAKE_CASE_ : Dict = Square(side_length=2.2)
key.move_to([-5, 2, 0])
SCREAMING_SNAKE_CASE_ : Union[str, Any] = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0])
step_a.move_to([2, 2, 0])
self.play(Write(lowercase_ , run_time=2.5) , Write(lowercase_) , Write(lowercase_))
self.add(lowercase_)
SCREAMING_SNAKE_CASE_ : Any = []
SCREAMING_SNAKE_CASE_ : Any = []
SCREAMING_SNAKE_CASE_ : List[str] = []
for i, rect in enumerate(lowercase_):
SCREAMING_SNAKE_CASE_ : Any = Rectangle(height=0.46 , width=0.46).set_stroke(width=0.0).set_fill(lowercase_ , opacity=0.7)
cpu_target.move_to(lowercase_)
cpu_target.generate_target()
SCREAMING_SNAKE_CASE_ : Optional[int] = 0.46 / 4
SCREAMING_SNAKE_CASE_ : str = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT) , buff=0.02 , direction=lowercase_)
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1)
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=lowercase_ , buff=0.0)
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=lowercase_ , buff=0.0)
cpu_targs.append(lowercase_)
first_animations.append(rect.animate(run_time=0.5).set_stroke(lowercase_))
second_animations.append(MoveToTarget(lowercase_ , run_time=1.5))
self.play(*lowercase_)
self.play(*lowercase_)
self.wait()
| 91 | from __future__ import annotations
from collections import deque
class A :
def __init__(self : Dict , __UpperCAmelCase : list[str] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = []
self.adlist.append(
{"value": "", "next_states": [], "fail_state": 0, "output": []} )
for keyword in keywords:
self.add_keyword(__UpperCAmelCase )
self.set_fail_transitions()
def lowercase_ (self : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : str ) -> int | None:
"""simple docstring"""
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def lowercase_ (self : Dict , __UpperCAmelCase : str ) -> None:
"""simple docstring"""
UpperCAmelCase__ = 0
for character in keyword:
UpperCAmelCase__ = self.find_next_state(__UpperCAmelCase , __UpperCAmelCase )
if next_state is None:
self.adlist.append(
{
"value": character,
"next_states": [],
"fail_state": 0,
"output": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
UpperCAmelCase__ = len(self.adlist ) - 1
else:
UpperCAmelCase__ = next_state
self.adlist[current_state]["output"].append(__UpperCAmelCase )
def lowercase_ (self : Optional[int] ) -> None:
"""simple docstring"""
UpperCAmelCase__ = deque()
for node in self.adlist[0]["next_states"]:
q.append(__UpperCAmelCase )
UpperCAmelCase__ = 0
while q:
UpperCAmelCase__ = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(__UpperCAmelCase )
UpperCAmelCase__ = self.adlist[r]["fail_state"]
while (
self.find_next_state(__UpperCAmelCase , self.adlist[child]["value"] ) is None
and state != 0
):
UpperCAmelCase__ = self.adlist[state]["fail_state"]
UpperCAmelCase__ = self.find_next_state(
__UpperCAmelCase , self.adlist[child]["value"] )
if self.adlist[child]["fail_state"] is None:
UpperCAmelCase__ = 0
UpperCAmelCase__ = (
self.adlist[child]["output"]
+ self.adlist[self.adlist[child]["fail_state"]]["output"]
)
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : str ) -> dict[str, list[int]]:
"""simple docstring"""
UpperCAmelCase__ = {} # returns a dict with keywords and list of its occurrences
UpperCAmelCase__ = 0
for i in range(len(__UpperCAmelCase ) ):
while (
self.find_next_state(__UpperCAmelCase , string[i] ) is None
and current_state != 0
):
UpperCAmelCase__ = self.adlist[current_state]["fail_state"]
UpperCAmelCase__ = self.find_next_state(__UpperCAmelCase , string[i] )
if next_state is None:
UpperCAmelCase__ = 0
else:
UpperCAmelCase__ = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
UpperCAmelCase__ = []
result[key].append(i - len(__UpperCAmelCase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 | 0 |
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
UpperCamelCase__ = logging.getLogger()
def _a ( ):
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("-f" )
__lowerCAmelCase = parser.parse_args()
return args.f
class a__ ( snake_case__ ):
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = logging.StreamHandler(sys.stdout )
logger.addHandler(_A )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
__lowerCAmelCase = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , "run_glue_deebert.py" )
with patch.object(_A , "argv" , _A ):
__lowerCAmelCase = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(_A , 0.6_66 )
@slow
@require_torch_non_multi_gpu
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = "\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n ".split()
self.run_and_check(_A )
__lowerCAmelCase = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(_A )
__lowerCAmelCase = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(_A )
| 92 | import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
UpperCamelCase__ = logging.get_logger(__name__)
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : int = ['input_values', 'attention_mask']
def __init__(self : Any , __UpperCAmelCase : int = 1 , __UpperCAmelCase : int = 1_6_0_0_0 , __UpperCAmelCase : float = 0.0 , __UpperCAmelCase : bool = False , __UpperCAmelCase : int = 8_0 , __UpperCAmelCase : int = 1_6 , __UpperCAmelCase : int = 6_4 , __UpperCAmelCase : str = "hann_window" , __UpperCAmelCase : float = 1.0 , __UpperCAmelCase : float = 8_0 , __UpperCAmelCase : float = 7_6_0_0 , __UpperCAmelCase : float = 1E-10 , __UpperCAmelCase : int = 2 , __UpperCAmelCase : bool = True , **__UpperCAmelCase : Any , ) -> str:
"""simple docstring"""
super().__init__(feature_size=__UpperCAmelCase , sampling_rate=__UpperCAmelCase , padding_value=__UpperCAmelCase , **__UpperCAmelCase )
UpperCAmelCase__ = do_normalize
UpperCAmelCase__ = return_attention_mask
UpperCAmelCase__ = num_mel_bins
UpperCAmelCase__ = hop_length
UpperCAmelCase__ = win_length
UpperCAmelCase__ = win_function
UpperCAmelCase__ = frame_signal_scale
UpperCAmelCase__ = fmin
UpperCAmelCase__ = fmax
UpperCAmelCase__ = mel_floor
UpperCAmelCase__ = reduction_factor
UpperCAmelCase__ = win_length * sampling_rate // 1_0_0_0
UpperCAmelCase__ = hop_length * sampling_rate // 1_0_0_0
UpperCAmelCase__ = optimal_fft_length(self.sample_size )
UpperCAmelCase__ = (self.n_fft // 2) + 1
UpperCAmelCase__ = window_function(window_length=self.sample_size , name=self.win_function , periodic=__UpperCAmelCase )
UpperCAmelCase__ = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm="slaney" , mel_scale="slaney" , )
if frame_signal_scale != 1.0:
warnings.warn(
"The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers" , __UpperCAmelCase , )
if reduction_factor != 2.0:
warnings.warn(
"The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers" , __UpperCAmelCase , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def lowercase_ (__UpperCAmelCase : List[np.ndarray] , __UpperCAmelCase : List[np.ndarray] , __UpperCAmelCase : float = 0.0 ) -> List[np.ndarray]:
"""simple docstring"""
if attention_mask is not None:
UpperCAmelCase__ = np.array(__UpperCAmelCase , np.intaa )
UpperCAmelCase__ = []
for vector, length in zip(__UpperCAmelCase , attention_mask.sum(-1 ) ):
UpperCAmelCase__ = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
UpperCAmelCase__ = padding_value
normed_input_values.append(__UpperCAmelCase )
else:
UpperCAmelCase__ = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def lowercase_ (self : Optional[int] , __UpperCAmelCase : np.ndarray , ) -> np.ndarray:
"""simple docstring"""
UpperCAmelCase__ = spectrogram(
__UpperCAmelCase , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel="log10" , )
return log_mel_spec.T
def __call__(self : Any , __UpperCAmelCase : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , __UpperCAmelCase : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , __UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : bool = False , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[Union[str, TensorType]] = None , __UpperCAmelCase : Optional[int] = None , **__UpperCAmelCase : str , ) -> BatchFeature:
"""simple docstring"""
if audio is None and audio_target is None:
raise ValueError("You must provide either `audio` or `audio_target` values." )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
if audio is not None:
UpperCAmelCase__ = self._process_audio(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , )
else:
UpperCAmelCase__ = None
if audio_target is not None:
UpperCAmelCase__ = self._process_audio(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , )
if inputs is None:
return inputs_target
else:
UpperCAmelCase__ = inputs_target["input_values"]
UpperCAmelCase__ = inputs_target.get("attention_mask" )
if decoder_attention_mask is not None:
UpperCAmelCase__ = decoder_attention_mask
return inputs
def lowercase_ (self : Optional[int] , __UpperCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __UpperCAmelCase : bool = False , __UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : bool = False , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[Union[str, TensorType]] = None , **__UpperCAmelCase : Any , ) -> BatchFeature:
"""simple docstring"""
UpperCAmelCase__ = isinstance(__UpperCAmelCase , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
UpperCAmelCase__ = is_batched_numpy or (
isinstance(__UpperCAmelCase , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCAmelCase__ = [np.asarray(__UpperCAmelCase , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(__UpperCAmelCase , np.ndarray ):
UpperCAmelCase__ = np.asarray(__UpperCAmelCase , dtype=np.floataa )
elif isinstance(__UpperCAmelCase , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
UpperCAmelCase__ = speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCAmelCase__ = [speech]
# needed to make pad() work on spectrogram inputs
UpperCAmelCase__ = self.feature_size
# convert into correct format for padding
if is_target:
UpperCAmelCase__ = [self._extract_mel_features(__UpperCAmelCase ) for waveform in speech]
UpperCAmelCase__ = BatchFeature({"input_values": features} )
UpperCAmelCase__ = self.num_mel_bins
else:
UpperCAmelCase__ = BatchFeature({"input_values": speech} )
UpperCAmelCase__ = self.pad(
__UpperCAmelCase , padding=__UpperCAmelCase , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , **__UpperCAmelCase , )
UpperCAmelCase__ = feature_size_hack
# convert input values to correct format
UpperCAmelCase__ = padded_inputs["input_values"]
if not isinstance(input_values[0] , np.ndarray ):
UpperCAmelCase__ = [np.asarray(__UpperCAmelCase , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(__UpperCAmelCase , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
UpperCAmelCase__ = [array.astype(np.floataa ) for array in input_values]
elif isinstance(__UpperCAmelCase , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
UpperCAmelCase__ = input_values.astype(np.floataa )
# convert attention_mask to correct format
UpperCAmelCase__ = padded_inputs.get("attention_mask" )
if attention_mask is not None:
UpperCAmelCase__ = [np.asarray(__UpperCAmelCase , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
UpperCAmelCase__ = (
attention_mask
if self._get_padding_strategies(__UpperCAmelCase , max_length=__UpperCAmelCase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
UpperCAmelCase__ = self.zero_mean_unit_var_norm(
padded_inputs["input_values"] , attention_mask=__UpperCAmelCase , padding_value=self.padding_value )
if return_tensors is not None:
UpperCAmelCase__ = padded_inputs.convert_to_tensors(__UpperCAmelCase )
return padded_inputs
def lowercase_ (self : Tuple ) -> Dict[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = super().to_dict()
# Don't serialize these as they are derived from the other properties.
UpperCAmelCase__ = ["window", "mel_filters", "sample_size", "sample_stride", "n_fft", "n_freqs"]
for name in names:
if name in output:
del output[name]
return output
| 65 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( lowerCamelCase_ , unittest.TestCase ):
lowerCAmelCase_ = DDIMPipeline
lowerCAmelCase_ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowerCAmelCase_ = PipelineTesterMixin.required_optional_params - {
'''num_images_per_prompt''',
'''latents''',
'''callback''',
'''callback_steps''',
}
lowerCAmelCase_ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
lowerCAmelCase_ = False
def _snake_case ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase_ : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
lowercase_ : List[str] = DDIMScheduler()
lowercase_ : Dict = {'''unet''': unet, '''scheduler''': scheduler}
return components
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ):
"""simple docstring"""
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
lowercase_ : List[str] = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
lowercase_ : List[Any] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : int = '''cpu'''
lowercase_ : Optional[int] = self.get_dummy_components()
lowercase_ : Union[str, Any] = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = pipe(**__SCREAMING_SNAKE_CASE ).images
lowercase_ : int = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
lowercase_ : Union[str, Any] = np.array(
[1.0_0_0E0_0, 5.7_1_7E-0_1, 4.7_1_7E-0_1, 1.0_0_0E0_0, 0.0_0_0E0_0, 1.0_0_0E0_0, 3.0_0_0E-0_4, 0.0_0_0E0_0, 9.0_0_0E-0_4] )
lowercase_ : Tuple = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1E-3 )
def _snake_case ( self ):
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def _snake_case ( self ):
"""simple docstring"""
super().test_save_load_local(expected_max_difference=3E-3 )
def _snake_case ( self ):
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def _snake_case ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[int] = '''google/ddpm-cifar10-32'''
lowercase_ : Union[str, Any] = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = DDIMScheduler()
lowercase_ : Union[str, Any] = DDIMPipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
ddim.to(__SCREAMING_SNAKE_CASE )
ddim.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowercase_ : int = torch.manual_seed(0 )
lowercase_ : Tuple = ddim(generator=__SCREAMING_SNAKE_CASE , eta=0.0 , output_type='''numpy''' ).images
lowercase_ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase_ : Union[str, Any] = np.array([0.1_723, 0.1_617, 0.1_600, 0.1_626, 0.1_497, 0.1_513, 0.1_505, 0.1_442, 0.1_453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[Any] = '''google/ddpm-ema-bedroom-256'''
lowercase_ : Dict = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = DDIMScheduler.from_pretrained(__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = DDIMPipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
ddpm.to(__SCREAMING_SNAKE_CASE )
ddpm.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowercase_ : str = torch.manual_seed(0 )
lowercase_ : str = ddpm(generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' ).images
lowercase_ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
lowercase_ : Tuple = np.array([0.0_060, 0.0_201, 0.0_344, 0.0_024, 0.0_018, 0.0_002, 0.0_022, 0.0_000, 0.0_069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 93 | from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Optional[torch.FloatTensor] = None
__UpperCAmelCase : torch.FloatTensor = None
__UpperCAmelCase : Optional[Tuple[torch.FloatTensor]] = None
__UpperCAmelCase : Optional[Tuple[torch.FloatTensor]] = None
class A ( UpperCAmelCase_ ):
def __init__(self : Union[str, Any] , __UpperCAmelCase : Tuple=1 , __UpperCAmelCase : str=0 , __UpperCAmelCase : str=2 , __UpperCAmelCase : Union[str, Any]=5_1_2 , __UpperCAmelCase : List[str]="cls" , __UpperCAmelCase : Optional[int]=False , __UpperCAmelCase : str=True , **__UpperCAmelCase : str , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
UpperCAmelCase__ = project_dim
UpperCAmelCase__ = pooler_fn
UpperCAmelCase__ = learn_encoder
UpperCAmelCase__ = use_attention_mask
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Tuple = [r'pooler', r'logit_scale']
__UpperCAmelCase : int = [r'position_ids', r'predictions.decoder.bias']
__UpperCAmelCase : Any = 'roberta'
__UpperCAmelCase : List[str] = RobertaSeriesConfig
def __init__(self : Tuple , __UpperCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
super().__init__(__UpperCAmelCase )
UpperCAmelCase__ = XLMRobertaModel(__UpperCAmelCase )
UpperCAmelCase__ = nn.Linear(config.hidden_size , config.project_dim )
UpperCAmelCase__ = getattr(__UpperCAmelCase , "has_pre_transformation" , __UpperCAmelCase )
if self.has_pre_transformation:
UpperCAmelCase__ = nn.Linear(config.hidden_size , config.project_dim )
UpperCAmelCase__ = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def lowercase_ (self : Optional[Any] , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[bool] = None , ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase__ = self.base_model(
input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , position_ids=__UpperCAmelCase , head_mask=__UpperCAmelCase , inputs_embeds=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , output_attentions=__UpperCAmelCase , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=__UpperCAmelCase , )
if self.has_pre_transformation:
UpperCAmelCase__ = outputs["hidden_states"][-2]
UpperCAmelCase__ = self.pre_LN(__UpperCAmelCase )
UpperCAmelCase__ = self.transformation_pre(__UpperCAmelCase )
return TransformationModelOutput(
projection_state=__UpperCAmelCase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
UpperCAmelCase__ = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=__UpperCAmelCase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 65 | 0 |
import qiskit
def __lowerCamelCase ( UpperCAmelCase_ : int = 2 ):
"""simple docstring"""
a :Tuple = qubits
# Using Aer's simulator
a :Union[str, Any] = qiskit.Aer.get_backend('''aer_simulator''' )
# Creating a Quantum Circuit acting on the q register
a :str = qiskit.QuantumCircuit(UpperCAmelCase_ , UpperCAmelCase_ )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , UpperCAmelCase_ ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , UpperCAmelCase_ )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(UpperCAmelCase_ ) ) , list(range(UpperCAmelCase_ ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
a :Union[str, Any] = qiskit.execute(UpperCAmelCase_ , UpperCAmelCase_ , shots=1000 )
return job.result().get_counts(UpperCAmelCase_ )
if __name__ == "__main__":
print(F"""Total count for various states are: {quantum_entanglement(3)}""")
| 94 | import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 6_50, 'eval_accuracy': 0.6, 'eval_loss': 0.9},
},
{
'framework': 'tensorflow',
'script': 'run_tf.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 6_00, 'eval_accuracy': 0.3, 'eval_loss': 0.9},
},
] )
class A ( unittest.TestCase ):
def lowercase_ (self : int ) -> Optional[Any]:
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="utf-8" , check=__UpperCAmelCase , )
assert hasattr(self , "env" )
def lowercase_ (self : List[Any] , __UpperCAmelCase : Optional[int]=1 ) -> Dict:
"""simple docstring"""
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-single""" , instance_count=__UpperCAmelCase , instance_type=self.instance_type , debugger_hook_config=__UpperCAmelCase , hyperparameters={**self.env.hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="py36" , )
def lowercase_ (self : Optional[Any] , __UpperCAmelCase : Tuple ) -> Optional[int]:
"""simple docstring"""
TrainingJobAnalytics(__UpperCAmelCase ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
def lowercase_ (self : Any ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.create_estimator()
# run training
estimator.fit()
# result dataframe
UpperCAmelCase__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCAmelCase__ = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
UpperCAmelCase__ = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCAmelCase__ = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , __UpperCAmelCase )
| 65 | 0 |
import os
from datetime import datetime as dt
from github import Github
UpperCAmelCase : str = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""enhancement""",
"""new pipeline/model""",
"""new scheduler""",
"""wip""",
]
def _A ( ):
"""simple docstring"""
a__ : Tuple =Github(os.environ["GITHUB_TOKEN"] )
a__ : int =g.get_repo("huggingface/diffusers" )
a__ : int =repo.get_issues(state="open" )
for issue in open_issues:
a__ : int =sorted(issue.get_comments() , key=lambda SCREAMING_SNAKE_CASE : i.created_at , reverse=SCREAMING_SNAKE_CASE )
a__ : Tuple =comments[0] if len(SCREAMING_SNAKE_CASE ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="closed" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="open" )
issue.remove_from_labels("stale" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
issue.add_to_labels("stale" )
if __name__ == "__main__":
main()
| 95 | import math
import random
def lowerCAmelCase_ ( __A, __A = False ) -> float:
'''simple docstring'''
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
UpperCamelCase__ = 0.0_2
def lowerCAmelCase_ ( __A, __A ) -> float:
'''simple docstring'''
UpperCAmelCase__ = float(2 * (random.randint(1, 100 )) - 1 )
for _ in range(__A ):
# Forward propagation
UpperCAmelCase__ = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
UpperCAmelCase__ = (expected / 100) - layer_a
# Error delta
UpperCAmelCase__ = layer_1_error * sigmoid_function(__A, __A )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ = int(input('Expected value: '))
UpperCamelCase__ = int(input('Number of propagations: '))
print(forward_propagation(expected, number_propagations))
| 65 | 0 |
"""simple docstring"""
import random
def _snake_case ( lowercase__ , lowercase__ , lowercase__ = False ):
_lowerCamelCase : dict = {i: [] for i in range(lowercase__ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(lowercase__ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(lowercase__ ):
for j in range(i + 1 , lowercase__ ):
if random.random() < probability:
graph[i].append(lowercase__ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(lowercase__ )
return graph
def _snake_case ( lowercase__ ):
return {
i: [j for j in range(lowercase__ ) if i != j] for i in range(lowercase__ )
}
if __name__ == "__main__":
import doctest
doctest.testmod() | 96 | from __future__ import annotations
class A :
def __init__(self : Union[str, Any] , __UpperCAmelCase : list[list[int]] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = TypeError(
"Matrices must be formed from a list of zero or more lists containing at "
"least one and the same number of values, each of which must be of type "
"int or float." )
if len(__UpperCAmelCase ) != 0:
UpperCAmelCase__ = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(__UpperCAmelCase ) != cols:
raise error
for value in row:
if not isinstance(__UpperCAmelCase , (int, float) ):
raise error
UpperCAmelCase__ = rows
else:
UpperCAmelCase__ = []
def lowercase_ (self : Any ) -> list[list[int]]:
"""simple docstring"""
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def lowercase_ (self : Any ) -> int:
"""simple docstring"""
return len(self.rows )
@property
def lowercase_ (self : Union[str, Any] ) -> int:
"""simple docstring"""
return len(self.rows[0] )
@property
def lowercase_ (self : List[Any] ) -> tuple[int, int]:
"""simple docstring"""
return (self.num_rows, self.num_columns)
@property
def lowercase_ (self : Tuple ) -> bool:
"""simple docstring"""
return self.order[0] == self.order[1]
def lowercase_ (self : Any ) -> Matrix:
"""simple docstring"""
UpperCAmelCase__ = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(__UpperCAmelCase )
def lowercase_ (self : int ) -> int:
"""simple docstring"""
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def lowercase_ (self : Tuple ) -> bool:
"""simple docstring"""
return bool(self.determinant() )
def lowercase_ (self : Dict , __UpperCAmelCase : int , __UpperCAmelCase : int ) -> int:
"""simple docstring"""
UpperCAmelCase__ = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(__UpperCAmelCase ).determinant()
def lowercase_ (self : int , __UpperCAmelCase : int , __UpperCAmelCase : int ) -> int:
"""simple docstring"""
if (row + column) % 2 == 0:
return self.get_minor(__UpperCAmelCase , __UpperCAmelCase )
return -1 * self.get_minor(__UpperCAmelCase , __UpperCAmelCase )
def lowercase_ (self : Union[str, Any] ) -> Matrix:
"""simple docstring"""
return Matrix(
[
[self.get_minor(__UpperCAmelCase , __UpperCAmelCase ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def lowercase_ (self : List[str] ) -> Matrix:
"""simple docstring"""
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def lowercase_ (self : Optional[Any] ) -> Matrix:
"""simple docstring"""
UpperCAmelCase__ = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(__UpperCAmelCase )
def lowercase_ (self : List[Any] ) -> Matrix:
"""simple docstring"""
UpperCAmelCase__ = self.determinant()
if not determinant:
raise TypeError("Only matrices with a non-zero determinant have an inverse" )
return self.adjugate() * (1 / determinant)
def __repr__(self : Dict ) -> str:
"""simple docstring"""
return str(self.rows )
def __str__(self : Optional[Any] ) -> str:
"""simple docstring"""
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
"[" + ". ".join([str(__UpperCAmelCase ) for value in row] ) + ".]"
for row in self.rows
] )
+ "]"
)
def lowercase_ (self : Optional[int] , __UpperCAmelCase : list[int] , __UpperCAmelCase : int | None = None ) -> None:
"""simple docstring"""
UpperCAmelCase__ = TypeError("Row must be a list containing all ints and/or floats" )
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise type_error
for value in row:
if not isinstance(__UpperCAmelCase , (int, float) ):
raise type_error
if len(__UpperCAmelCase ) != self.num_columns:
raise ValueError(
"Row must be equal in length to the other rows in the matrix" )
if position is None:
self.rows.append(__UpperCAmelCase )
else:
UpperCAmelCase__ = self.rows[0:position] + [row] + self.rows[position:]
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : list[int] , __UpperCAmelCase : int | None = None ) -> None:
"""simple docstring"""
UpperCAmelCase__ = TypeError(
"Column must be a list containing all ints and/or floats" )
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise type_error
for value in column:
if not isinstance(__UpperCAmelCase , (int, float) ):
raise type_error
if len(__UpperCAmelCase ) != self.num_rows:
raise ValueError(
"Column must be equal in length to the other columns in the matrix" )
if position is None:
UpperCAmelCase__ = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
UpperCAmelCase__ = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__(self : Any , __UpperCAmelCase : object ) -> bool:
"""simple docstring"""
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return NotImplemented
return self.rows == other.rows
def __ne__(self : int , __UpperCAmelCase : object ) -> bool:
"""simple docstring"""
return not self == other
def __neg__(self : Dict ) -> Matrix:
"""simple docstring"""
return self * -1
def __add__(self : Dict , __UpperCAmelCase : Matrix ) -> Matrix:
"""simple docstring"""
if self.order != other.order:
raise ValueError("Addition requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__(self : Optional[Any] , __UpperCAmelCase : Matrix ) -> Matrix:
"""simple docstring"""
if self.order != other.order:
raise ValueError("Subtraction requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__(self : Tuple , __UpperCAmelCase : Matrix | int | float ) -> Matrix:
"""simple docstring"""
if isinstance(__UpperCAmelCase , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
if self.num_columns != other.num_rows:
raise ValueError(
"The number of columns in the first matrix must "
"be equal to the number of rows in the second" )
return Matrix(
[
[Matrix.dot_product(__UpperCAmelCase , __UpperCAmelCase ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
"A Matrix can only be multiplied by an int, float, or another matrix" )
def __pow__(self : List[Any] , __UpperCAmelCase : int ) -> Matrix:
"""simple docstring"""
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("A Matrix can only be raised to the power of an int" )
if not self.is_square:
raise ValueError("Only square matrices can be raised to a power" )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"Only invertable matrices can be raised to a negative power" )
UpperCAmelCase__ = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def lowercase_ (cls : Dict , __UpperCAmelCase : list[int] , __UpperCAmelCase : list[int] ) -> int:
"""simple docstring"""
return sum(row[i] * column[i] for i in range(len(__UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 | 0 |
'''simple docstring'''
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Any = set_counts
UpperCamelCase__ :Optional[int] = max(UpperCamelCase_ )
UpperCamelCase__ :List[Any] = len(UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = [1] * num_sets
UpperCamelCase__ :List[Any] = list(range(UpperCamelCase_ ) )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = self.get_parent(UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = self.get_parent(UpperCamelCase_ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
UpperCamelCase__ :Tuple = 0
UpperCamelCase__ :Dict = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
UpperCamelCase__ :Optional[int] = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
UpperCamelCase__ :Tuple = 0
UpperCamelCase__ :str = src_parent
UpperCamelCase__ :Optional[Any] = self.set_counts[src_parent]
UpperCamelCase__ :Tuple = max(self.max_set , UpperCamelCase_ )
return True
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
if self.parents[disj_set] == disj_set:
return disj_set
UpperCamelCase__ :Dict = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set] | 97 | import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'vocab_file': 'vocab.json',
'tokenizer_config_file': 'tokenizer_config.json',
'merges_file': 'merges.txt',
}
UpperCamelCase__ = {
'vocab_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'
),
},
'tokenizer_config_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'
),
},
'merges_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'
),
},
}
UpperCamelCase__ = '</w>'
UpperCamelCase__ = '@@ '
def lowerCAmelCase_ ( __A ) -> str:
'''simple docstring'''
UpperCAmelCase__ = set()
UpperCAmelCase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase__ = char
return pairs
# Speech2Text2 has no max input length
UpperCamelCase__ = {'facebook/s2t-wav2vec2-large-en-de': 1_0_2_4}
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : str = VOCAB_FILES_NAMES
__UpperCAmelCase : str = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Dict = ['input_ids', 'attention_mask']
def __init__(self : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict="<s>" , __UpperCAmelCase : Tuple="<pad>" , __UpperCAmelCase : str="</s>" , __UpperCAmelCase : int="<unk>" , __UpperCAmelCase : List[str]=False , __UpperCAmelCase : str=None , **__UpperCAmelCase : Optional[Any] , ) -> Tuple:
"""simple docstring"""
super().__init__(
unk_token=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , **__UpperCAmelCase , )
UpperCAmelCase__ = do_lower_case
with open(__UpperCAmelCase , encoding="utf-8" ) as vocab_handle:
UpperCAmelCase__ = json.load(__UpperCAmelCase )
UpperCAmelCase__ = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""" )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
else:
with open(__UpperCAmelCase , encoding="utf-8" ) as merges_handle:
UpperCAmelCase__ = merges_handle.read().split("\n" )[:-1]
UpperCAmelCase__ = [tuple(merge.split()[:2] ) for merge in merges]
UpperCAmelCase__ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
UpperCAmelCase__ = {}
@property
def lowercase_ (self : List[str] ) -> int:
"""simple docstring"""
return len(self.decoder )
def lowercase_ (self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase_ (self : Dict , __UpperCAmelCase : Union[str, Any] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
UpperCAmelCase__ = get_pairs(__UpperCAmelCase )
if not pairs:
return token
while True:
UpperCAmelCase__ = min(__UpperCAmelCase , key=lambda __UpperCAmelCase : self.bpe_ranks.get(__UpperCAmelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase__ , UpperCAmelCase__ = bigram
UpperCAmelCase__ = []
UpperCAmelCase__ = 0
while i < len(__UpperCAmelCase ):
try:
UpperCAmelCase__ = word.index(__UpperCAmelCase , __UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase__ = j
if word[i] == first and i < len(__UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase__ = tuple(__UpperCAmelCase )
UpperCAmelCase__ = new_word
if len(__UpperCAmelCase ) == 1:
break
else:
UpperCAmelCase__ = get_pairs(__UpperCAmelCase )
UpperCAmelCase__ = " ".join(__UpperCAmelCase )
if word == "\n " + BPE_TOKEN_MERGES:
UpperCAmelCase__ = "\n" + BPE_TOKEN_MERGES
if word.endswith(__UpperCAmelCase ):
UpperCAmelCase__ = word.replace(__UpperCAmelCase , "" )
UpperCAmelCase__ = word.replace(" " , __UpperCAmelCase )
UpperCAmelCase__ = word
return word
def lowercase_ (self : Tuple , __UpperCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
if self.bpe_ranks is None:
raise ValueError(
"This tokenizer was instantiated without a `merges.txt` file, so"
" that it can only be used for decoding, not for encoding."
"Make sure to provide `merges.txt` file at instantiation to enable "
"encoding." )
if self.do_lower_case:
UpperCAmelCase__ = text.lower()
UpperCAmelCase__ = text.split()
UpperCAmelCase__ = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(__UpperCAmelCase ).split(" " ) ) )
return split_tokens
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : str ) -> int:
"""simple docstring"""
return self.encoder.get(__UpperCAmelCase , self.encoder.get(self.unk_token ) )
def lowercase_ (self : Any , __UpperCAmelCase : int ) -> str:
"""simple docstring"""
UpperCAmelCase__ = self.decoder.get(__UpperCAmelCase , self.unk_token )
return result
def lowercase_ (self : Dict , __UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = " ".join(__UpperCAmelCase )
# make sure @@ tokens are concatenated
UpperCAmelCase__ = "".join(string.split(__UpperCAmelCase ) )
return string
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase__ = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase__ = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__UpperCAmelCase , ensure_ascii=__UpperCAmelCase ) + "\n" )
UpperCAmelCase__ = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
UpperCAmelCase__ = token_index
writer.write(" ".join(__UpperCAmelCase ) + "\n" )
index += 1
return (vocab_file, merges_file)
| 65 | 0 |
"""simple docstring"""
import math
def a_ ( lowerCamelCase , lowerCamelCase ):
if (
not isinstance(lowerCamelCase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * power_factor
def a_ ( lowerCamelCase , lowerCamelCase ):
if (
not isinstance(lowerCamelCase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : torch.FloatTensor
class A ( nn.Module ):
def __init__(self : Union[str, Any] , __UpperCAmelCase : int=3 , __UpperCAmelCase : Dict=3 , __UpperCAmelCase : Optional[Any]=("DownEncoderBlock2D",) , __UpperCAmelCase : int=(6_4,) , __UpperCAmelCase : Union[str, Any]=2 , __UpperCAmelCase : Any=3_2 , __UpperCAmelCase : str="silu" , __UpperCAmelCase : Any=True , ) -> Dict:
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = layers_per_block
UpperCAmelCase__ = torch.nn.Convad(
__UpperCAmelCase , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
UpperCAmelCase__ = None
UpperCAmelCase__ = nn.ModuleList([] )
# down
UpperCAmelCase__ = block_out_channels[0]
for i, down_block_type in enumerate(__UpperCAmelCase ):
UpperCAmelCase__ = output_channel
UpperCAmelCase__ = block_out_channels[i]
UpperCAmelCase__ = i == len(__UpperCAmelCase ) - 1
UpperCAmelCase__ = get_down_block(
__UpperCAmelCase , num_layers=self.layers_per_block , in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=__UpperCAmelCase , resnet_groups=__UpperCAmelCase , attention_head_dim=__UpperCAmelCase , temb_channels=__UpperCAmelCase , )
self.down_blocks.append(__UpperCAmelCase )
# mid
UpperCAmelCase__ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=__UpperCAmelCase , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=__UpperCAmelCase , temb_channels=__UpperCAmelCase , )
# out
UpperCAmelCase__ = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=__UpperCAmelCase , eps=1E-6 )
UpperCAmelCase__ = nn.SiLU()
UpperCAmelCase__ = 2 * out_channels if double_z else out_channels
UpperCAmelCase__ = nn.Convad(block_out_channels[-1] , __UpperCAmelCase , 3 , padding=1 )
UpperCAmelCase__ = False
def lowercase_ (self : List[Any] , __UpperCAmelCase : int ) -> str:
"""simple docstring"""
UpperCAmelCase__ = x
UpperCAmelCase__ = self.conv_in(__UpperCAmelCase )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__UpperCAmelCase : int ):
def custom_forward(*__UpperCAmelCase : Optional[Any] ):
return module(*__UpperCAmelCase )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
UpperCAmelCase__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(__UpperCAmelCase ) , __UpperCAmelCase , use_reentrant=__UpperCAmelCase )
# middle
UpperCAmelCase__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __UpperCAmelCase , use_reentrant=__UpperCAmelCase )
else:
for down_block in self.down_blocks:
UpperCAmelCase__ = torch.utils.checkpoint.checkpoint(create_custom_forward(__UpperCAmelCase ) , __UpperCAmelCase )
# middle
UpperCAmelCase__ = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , __UpperCAmelCase )
else:
# down
for down_block in self.down_blocks:
UpperCAmelCase__ = down_block(__UpperCAmelCase )
# middle
UpperCAmelCase__ = self.mid_block(__UpperCAmelCase )
# post-process
UpperCAmelCase__ = self.conv_norm_out(__UpperCAmelCase )
UpperCAmelCase__ = self.conv_act(__UpperCAmelCase )
UpperCAmelCase__ = self.conv_out(__UpperCAmelCase )
return sample
class A ( nn.Module ):
def __init__(self : List[Any] , __UpperCAmelCase : str=3 , __UpperCAmelCase : Union[str, Any]=3 , __UpperCAmelCase : Optional[int]=("UpDecoderBlock2D",) , __UpperCAmelCase : str=(6_4,) , __UpperCAmelCase : Optional[Any]=2 , __UpperCAmelCase : Tuple=3_2 , __UpperCAmelCase : Any="silu" , __UpperCAmelCase : Any="group" , ) -> Dict:
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = layers_per_block
UpperCAmelCase__ = nn.Convad(
__UpperCAmelCase , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
UpperCAmelCase__ = None
UpperCAmelCase__ = nn.ModuleList([] )
UpperCAmelCase__ = in_channels if norm_type == "spatial" else None
# mid
UpperCAmelCase__ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=__UpperCAmelCase , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=__UpperCAmelCase , temb_channels=__UpperCAmelCase , )
# up
UpperCAmelCase__ = list(reversed(__UpperCAmelCase ) )
UpperCAmelCase__ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__UpperCAmelCase ):
UpperCAmelCase__ = output_channel
UpperCAmelCase__ = reversed_block_out_channels[i]
UpperCAmelCase__ = i == len(__UpperCAmelCase ) - 1
UpperCAmelCase__ = get_up_block(
__UpperCAmelCase , num_layers=self.layers_per_block + 1 , in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , prev_output_channel=__UpperCAmelCase , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=__UpperCAmelCase , resnet_groups=__UpperCAmelCase , attention_head_dim=__UpperCAmelCase , temb_channels=__UpperCAmelCase , resnet_time_scale_shift=__UpperCAmelCase , )
self.up_blocks.append(__UpperCAmelCase )
UpperCAmelCase__ = output_channel
# out
if norm_type == "spatial":
UpperCAmelCase__ = SpatialNorm(block_out_channels[0] , __UpperCAmelCase )
else:
UpperCAmelCase__ = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=__UpperCAmelCase , eps=1E-6 )
UpperCAmelCase__ = nn.SiLU()
UpperCAmelCase__ = nn.Convad(block_out_channels[0] , __UpperCAmelCase , 3 , padding=1 )
UpperCAmelCase__ = False
def lowercase_ (self : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict=None ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = z
UpperCAmelCase__ = self.conv_in(__UpperCAmelCase )
UpperCAmelCase__ = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__UpperCAmelCase : str ):
def custom_forward(*__UpperCAmelCase : List[str] ):
return module(*__UpperCAmelCase )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
UpperCAmelCase__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __UpperCAmelCase , __UpperCAmelCase , use_reentrant=__UpperCAmelCase )
UpperCAmelCase__ = sample.to(__UpperCAmelCase )
# up
for up_block in self.up_blocks:
UpperCAmelCase__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(__UpperCAmelCase ) , __UpperCAmelCase , __UpperCAmelCase , use_reentrant=__UpperCAmelCase )
else:
# middle
UpperCAmelCase__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = sample.to(__UpperCAmelCase )
# up
for up_block in self.up_blocks:
UpperCAmelCase__ = torch.utils.checkpoint.checkpoint(create_custom_forward(__UpperCAmelCase ) , __UpperCAmelCase , __UpperCAmelCase )
else:
# middle
UpperCAmelCase__ = self.mid_block(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = sample.to(__UpperCAmelCase )
# up
for up_block in self.up_blocks:
UpperCAmelCase__ = up_block(__UpperCAmelCase , __UpperCAmelCase )
# post-process
if latent_embeds is None:
UpperCAmelCase__ = self.conv_norm_out(__UpperCAmelCase )
else:
UpperCAmelCase__ = self.conv_norm_out(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = self.conv_act(__UpperCAmelCase )
UpperCAmelCase__ = self.conv_out(__UpperCAmelCase )
return sample
class A ( nn.Module ):
def __init__(self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Union[str, Any]="random" , __UpperCAmelCase : Dict=False , __UpperCAmelCase : Union[str, Any]=True ) -> Dict:
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = n_e
UpperCAmelCase__ = vq_embed_dim
UpperCAmelCase__ = beta
UpperCAmelCase__ = legacy
UpperCAmelCase__ = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
UpperCAmelCase__ = remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
UpperCAmelCase__ = self.used.shape[0]
UpperCAmelCase__ = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
UpperCAmelCase__ = self.re_embed
UpperCAmelCase__ = self.re_embed + 1
print(
f"""Remapping {self.n_e} indices to {self.re_embed} indices. """
f"""Using {self.unknown_index} for unknown indices.""" )
else:
UpperCAmelCase__ = n_e
UpperCAmelCase__ = sane_index_shape
def lowercase_ (self : str , __UpperCAmelCase : str ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = inds.shape
assert len(__UpperCAmelCase ) > 1
UpperCAmelCase__ = inds.reshape(ishape[0] , -1 )
UpperCAmelCase__ = self.used.to(__UpperCAmelCase )
UpperCAmelCase__ = (inds[:, :, None] == used[None, None, ...]).long()
UpperCAmelCase__ = match.argmax(-1 )
UpperCAmelCase__ = match.sum(2 ) < 1
if self.unknown_index == "random":
UpperCAmelCase__ = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
UpperCAmelCase__ = self.unknown_index
return new.reshape(__UpperCAmelCase )
def lowercase_ (self : Tuple , __UpperCAmelCase : Optional[int] ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = inds.shape
assert len(__UpperCAmelCase ) > 1
UpperCAmelCase__ = inds.reshape(ishape[0] , -1 )
UpperCAmelCase__ = self.used.to(__UpperCAmelCase )
if self.re_embed > self.used.shape[0]: # extra token
UpperCAmelCase__ = 0 # simply set to zero
UpperCAmelCase__ = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , __UpperCAmelCase )
return back.reshape(__UpperCAmelCase )
def lowercase_ (self : Optional[Any] , __UpperCAmelCase : Dict ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = z.permute(0 , 2 , 3 , 1 ).contiguous()
UpperCAmelCase__ = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
UpperCAmelCase__ = torch.argmin(torch.cdist(__UpperCAmelCase , self.embedding.weight ) , dim=1 )
UpperCAmelCase__ = self.embedding(__UpperCAmelCase ).view(z.shape )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
# compute loss for embedding
if not self.legacy:
UpperCAmelCase__ = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
UpperCAmelCase__ = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
UpperCAmelCase__ = z + (z_q - z).detach()
# reshape back to match original input shape
UpperCAmelCase__ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
UpperCAmelCase__ = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
UpperCAmelCase__ = self.remap_to_used(__UpperCAmelCase )
UpperCAmelCase__ = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
UpperCAmelCase__ = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def lowercase_ (self : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] ) -> Any:
"""simple docstring"""
if self.remap is not None:
UpperCAmelCase__ = indices.reshape(shape[0] , -1 ) # add batch axis
UpperCAmelCase__ = self.unmap_to_all(__UpperCAmelCase )
UpperCAmelCase__ = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
UpperCAmelCase__ = self.embedding(__UpperCAmelCase )
if shape is not None:
UpperCAmelCase__ = z_q.view(__UpperCAmelCase )
# reshape back to match original input shape
UpperCAmelCase__ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class A ( UpperCAmelCase_ ):
def __init__(self : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : str=False ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = parameters
UpperCAmelCase__ , UpperCAmelCase__ = torch.chunk(__UpperCAmelCase , 2 , dim=1 )
UpperCAmelCase__ = torch.clamp(self.logvar , -30.0 , 20.0 )
UpperCAmelCase__ = deterministic
UpperCAmelCase__ = torch.exp(0.5 * self.logvar )
UpperCAmelCase__ = torch.exp(self.logvar )
if self.deterministic:
UpperCAmelCase__ = UpperCAmelCase__ = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : Optional[torch.Generator] = None ) -> torch.FloatTensor:
"""simple docstring"""
UpperCAmelCase__ = randn_tensor(
self.mean.shape , generator=__UpperCAmelCase , device=self.parameters.device , dtype=self.parameters.dtype )
UpperCAmelCase__ = self.mean + self.std * sample
return x
def lowercase_ (self : str , __UpperCAmelCase : int=None ) -> Any:
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def lowercase_ (self : Dict , __UpperCAmelCase : Tuple , __UpperCAmelCase : Any=[1, 2, 3] ) -> Dict:
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
UpperCAmelCase__ = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=__UpperCAmelCase )
def lowercase_ (self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return self.mean
| 65 | 0 |
def A_ ( A__ ) -> int:
if not isinstance(A__ , A__ ):
raise ValueError('Input must be an integer' )
if input_num <= 0:
raise ValueError('Input must be positive' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 99 | import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def lowerCAmelCase_ ( __A, __A=False ) -> Any:
'''simple docstring'''
try:
UpperCAmelCase__ = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCAmelCase__ = default
else:
# KEY is set, convert it to True or False.
try:
UpperCAmelCase__ = strtobool(__A )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"""If set, {key} must be yes or no.""" )
return _value
UpperCamelCase__ = parse_flag_from_env('RUN_SLOW', default=False)
UpperCamelCase__ = parse_flag_from_env('RUN_REMOTE', default=False)
UpperCamelCase__ = parse_flag_from_env('RUN_LOCAL', default=True)
UpperCamelCase__ = parse_flag_from_env('RUN_PACKAGED', default=True)
# Compression
UpperCamelCase__ = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4')
UpperCamelCase__ = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr')
UpperCamelCase__ = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard')
# Audio
UpperCamelCase__ = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'),
reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ',
)
# Beam
UpperCamelCase__ = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'),
reason='test requires apache-beam and a compatible dill version',
)
# Dill-cloudpickle compatibility
UpperCamelCase__ = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('0.3.2'),
reason='test requires dill>0.3.2 for cloudpickle compatibility',
)
# Windows
UpperCamelCase__ = pytest.mark.skipif(
sys.platform == 'win32',
reason='test should not be run on Windows',
)
def lowerCAmelCase_ ( __A ) -> Any:
'''simple docstring'''
try:
import faiss # noqa
except ImportError:
UpperCAmelCase__ = unittest.skip("test requires faiss" )(__A )
return test_case
def lowerCAmelCase_ ( __A ) -> Optional[Any]:
'''simple docstring'''
try:
import regex # noqa
except ImportError:
UpperCAmelCase__ = unittest.skip("test requires regex" )(__A )
return test_case
def lowerCAmelCase_ ( __A ) -> List[str]:
'''simple docstring'''
try:
import elasticsearch # noqa
except ImportError:
UpperCAmelCase__ = unittest.skip("test requires elasticsearch" )(__A )
return test_case
def lowerCAmelCase_ ( __A ) -> List[Any]:
'''simple docstring'''
try:
import sqlalchemy # noqa
except ImportError:
UpperCAmelCase__ = unittest.skip("test requires sqlalchemy" )(__A )
return test_case
def lowerCAmelCase_ ( __A ) -> List[str]:
'''simple docstring'''
if not config.TORCH_AVAILABLE:
UpperCAmelCase__ = unittest.skip("test requires PyTorch" )(__A )
return test_case
def lowerCAmelCase_ ( __A ) -> Union[str, Any]:
'''simple docstring'''
if not config.TF_AVAILABLE:
UpperCAmelCase__ = unittest.skip("test requires TensorFlow" )(__A )
return test_case
def lowerCAmelCase_ ( __A ) -> Any:
'''simple docstring'''
if not config.JAX_AVAILABLE:
UpperCAmelCase__ = unittest.skip("test requires JAX" )(__A )
return test_case
def lowerCAmelCase_ ( __A ) -> int:
'''simple docstring'''
if not config.PIL_AVAILABLE:
UpperCAmelCase__ = unittest.skip("test requires Pillow" )(__A )
return test_case
def lowerCAmelCase_ ( __A ) -> Tuple:
'''simple docstring'''
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("test requires transformers" )(__A )
else:
return test_case
def lowerCAmelCase_ ( __A ) -> Dict:
'''simple docstring'''
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("test requires tiktoken" )(__A )
else:
return test_case
def lowerCAmelCase_ ( __A ) -> Optional[Any]:
'''simple docstring'''
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("test requires spacy" )(__A )
else:
return test_case
def lowerCAmelCase_ ( __A ) -> Optional[int]:
'''simple docstring'''
def _require_spacy_model(__A ):
try:
import spacy # noqa F401
spacy.load(__A )
except ImportError:
return unittest.skip("test requires spacy" )(__A )
except OSError:
return unittest.skip("test requires spacy model '{}'".format(__A ) )(__A )
else:
return test_case
return _require_spacy_model
def lowerCAmelCase_ ( __A ) -> Optional[Any]:
'''simple docstring'''
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("test requires pyspark" )(__A )
else:
return test_case
def lowerCAmelCase_ ( __A ) -> Tuple:
'''simple docstring'''
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("test requires joblibspark" )(__A )
else:
return test_case
def lowerCAmelCase_ ( __A ) -> Optional[int]:
'''simple docstring'''
if not _run_slow_tests or _run_slow_tests == 0:
UpperCAmelCase__ = unittest.skip("test is slow" )(__A )
return test_case
def lowerCAmelCase_ ( __A ) -> List[Any]:
'''simple docstring'''
if not _run_local_tests or _run_local_tests == 0:
UpperCAmelCase__ = unittest.skip("test is local" )(__A )
return test_case
def lowerCAmelCase_ ( __A ) -> Optional[Any]:
'''simple docstring'''
if not _run_packaged_tests or _run_packaged_tests == 0:
UpperCAmelCase__ = unittest.skip("test is packaged" )(__A )
return test_case
def lowerCAmelCase_ ( __A ) -> Any:
'''simple docstring'''
if not _run_remote_tests or _run_remote_tests == 0:
UpperCAmelCase__ = unittest.skip("test requires remote" )(__A )
return test_case
def lowerCAmelCase_ ( *__A ) -> Optional[int]:
'''simple docstring'''
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(__A ) and name.startswith("test" ):
for decorator in decorators:
UpperCAmelCase__ = decorator(__A )
setattr(cls, __A, __A )
return cls
return decorate
class A ( UpperCAmelCase_ ):
pass
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Union[str, Any] = 0
__UpperCAmelCase : str = 1
__UpperCAmelCase : int = 2
@contextmanager
def lowerCAmelCase_ ( __A=OfflineSimulationMode.CONNECTION_FAILS, __A=1e-16 ) -> List[str]:
'''simple docstring'''
UpperCAmelCase__ = requests.Session().request
def timeout_request(__A, __A, __A, **__A ):
# Change the url to an invalid url so that the connection hangs
UpperCAmelCase__ = "https://10.255.255.1"
if kwargs.get("timeout" ) is None:
raise RequestWouldHangIndefinitelyError(
f"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
UpperCAmelCase__ = timeout
try:
return online_request(__A, __A, **__A )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
UpperCAmelCase__ = url
UpperCAmelCase__ = e.args[0]
UpperCAmelCase__ = (max_retry_error.args[0].replace("10.255.255.1", f"""OfflineMock[{url}]""" ),)
UpperCAmelCase__ = (max_retry_error,)
raise
def raise_connection_error(__A, __A, **__A ):
raise requests.ConnectionError("Offline mode is enabled.", request=__A )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("requests.Session.send", __A ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("requests.Session.request", __A ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("datasets.config.HF_DATASETS_OFFLINE", __A ):
yield
else:
raise ValueError("Please use a value from the OfflineSimulationMode enum." )
@contextmanager
def lowerCAmelCase_ ( *__A, **__A ) -> str:
'''simple docstring'''
UpperCAmelCase__ = str(Path().resolve() )
with tempfile.TemporaryDirectory(*__A, **__A ) as tmp_dir:
try:
os.chdir(__A )
yield
finally:
os.chdir(__A )
@contextmanager
def lowerCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
import gc
gc.collect()
UpperCAmelCase__ = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def lowerCAmelCase_ ( ) -> List[str]:
'''simple docstring'''
import gc
gc.collect()
UpperCAmelCase__ = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def lowerCAmelCase_ ( __A, __A ) -> List[str]:
'''simple docstring'''
return deepcopy(__A ).integers(0, 100, 10 ).tolist() == deepcopy(__A ).integers(0, 100, 10 ).tolist()
def lowerCAmelCase_ ( __A ) -> Optional[int]:
'''simple docstring'''
import decorator
from requests.exceptions import HTTPError
def _wrapper(__A, *__A, **__A ):
try:
return func(*__A, **__A )
except HTTPError as err:
if str(__A ).startswith("500" ) or str(__A ).startswith("502" ):
pytest.xfail(str(__A ) )
raise err
return decorator.decorator(_wrapper, __A )
class A :
def __init__(self : Optional[Any] , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : List[str] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = returncode
UpperCAmelCase__ = stdout
UpperCAmelCase__ = stderr
async def lowerCAmelCase_ ( __A, __A ) -> Optional[int]:
'''simple docstring'''
while True:
UpperCAmelCase__ = await stream.readline()
if line:
callback(__A )
else:
break
async def lowerCAmelCase_ ( __A, __A=None, __A=None, __A=None, __A=False, __A=False ) -> _RunOutput:
'''simple docstring'''
if echo:
print("\nRunning: ", " ".join(__A ) )
UpperCAmelCase__ = await asyncio.create_subprocess_exec(
cmd[0], *cmd[1:], stdin=__A, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, env=__A, )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCAmelCase__ = []
UpperCAmelCase__ = []
def tee(__A, __A, __A, __A="" ):
UpperCAmelCase__ = line.decode("utf-8" ).rstrip()
sink.append(__A )
if not quiet:
print(__A, __A, file=__A )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda __A : tee(__A, __A, sys.stdout, label="stdout:" ) ),
_read_stream(p.stderr, lambda __A : tee(__A, __A, sys.stderr, label="stderr:" ) ),
], timeout=__A, )
return _RunOutput(await p.wait(), __A, __A )
def lowerCAmelCase_ ( __A, __A=None, __A=None, __A=180, __A=False, __A=True ) -> _RunOutput:
'''simple docstring'''
UpperCAmelCase__ = asyncio.get_event_loop()
UpperCAmelCase__ = loop.run_until_complete(
_stream_subprocess(__A, env=__A, stdin=__A, timeout=__A, quiet=__A, echo=__A ) )
UpperCAmelCase__ = " ".join(__A )
if result.returncode > 0:
UpperCAmelCase__ = "\n".join(result.stderr )
raise RuntimeError(
f"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
f"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"""'{cmd_str}' produced no output.""" )
return result
def lowerCAmelCase_ ( ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ = os.environ.get("PYTEST_XDIST_WORKER", "gw0" )
UpperCAmelCase__ = re.sub(r"^gw", "", __A, 0, re.M )
return int(__A )
def lowerCAmelCase_ ( ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ = 29_500
UpperCAmelCase__ = pytest_xdist_worker_id()
return port + uniq_delta
| 65 | 0 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"microsoft/conditional-detr-resnet-50": (
"https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"
),
}
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : Any = '''conditional_detr'''
__lowercase : str = ['''past_key_values''']
__lowercase : Union[str, Any] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=3 , lowerCAmelCase__=3_0_0 , lowerCAmelCase__=6 , lowerCAmelCase__=2_0_4_8 , lowerCAmelCase__=8 , lowerCAmelCase__=6 , lowerCAmelCase__=2_0_4_8 , lowerCAmelCase__=8 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=True , lowerCAmelCase__="relu" , lowerCAmelCase__=2_5_6 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1.0 , lowerCAmelCase__=False , lowerCAmelCase__="sine" , lowerCAmelCase__="resnet50" , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=2 , lowerCAmelCase__=5 , lowerCAmelCase__=2 , lowerCAmelCase__=1 , lowerCAmelCase__=1 , lowerCAmelCase__=2 , lowerCAmelCase__=5 , lowerCAmelCase__=2 , lowerCAmelCase__=0.25 , **lowerCAmelCase__ , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""")
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""")
__SCREAMING_SNAKE_CASE = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""])
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = backbone_config.get("""model_type""")
__SCREAMING_SNAKE_CASE = CONFIG_MAPPING[backbone_model_type]
__SCREAMING_SNAKE_CASE = config_class.from_dict(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = use_timm_backbone
__SCREAMING_SNAKE_CASE = backbone_config
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = num_queries
__SCREAMING_SNAKE_CASE = d_model
__SCREAMING_SNAKE_CASE = encoder_ffn_dim
__SCREAMING_SNAKE_CASE = encoder_layers
__SCREAMING_SNAKE_CASE = encoder_attention_heads
__SCREAMING_SNAKE_CASE = decoder_ffn_dim
__SCREAMING_SNAKE_CASE = decoder_layers
__SCREAMING_SNAKE_CASE = decoder_attention_heads
__SCREAMING_SNAKE_CASE = dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = activation_function
__SCREAMING_SNAKE_CASE = init_std
__SCREAMING_SNAKE_CASE = init_xavier_std
__SCREAMING_SNAKE_CASE = encoder_layerdrop
__SCREAMING_SNAKE_CASE = decoder_layerdrop
__SCREAMING_SNAKE_CASE = encoder_layers
__SCREAMING_SNAKE_CASE = auxiliary_loss
__SCREAMING_SNAKE_CASE = position_embedding_type
__SCREAMING_SNAKE_CASE = backbone
__SCREAMING_SNAKE_CASE = use_pretrained_backbone
__SCREAMING_SNAKE_CASE = dilation
# Hungarian matcher
__SCREAMING_SNAKE_CASE = class_cost
__SCREAMING_SNAKE_CASE = bbox_cost
__SCREAMING_SNAKE_CASE = giou_cost
# Loss coefficients
__SCREAMING_SNAKE_CASE = mask_loss_coefficient
__SCREAMING_SNAKE_CASE = dice_loss_coefficient
__SCREAMING_SNAKE_CASE = cls_loss_coefficient
__SCREAMING_SNAKE_CASE = bbox_loss_coefficient
__SCREAMING_SNAKE_CASE = giou_loss_coefficient
__SCREAMING_SNAKE_CASE = focal_alpha
super().__init__(is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__)
@property
def snake_case_ ( self):
return self.encoder_attention_heads
@property
def snake_case_ ( self):
return self.d_model
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__)
if self.backbone_config is not None:
__SCREAMING_SNAKE_CASE = self.backbone_config.to_dict()
__SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : List[Any] = version.parse('''1.11''' )
@property
def snake_case_ ( self):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
])
@property
def snake_case_ ( self):
return 1E-5
@property
def snake_case_ ( self):
return 1_2
| 100 | def lowerCAmelCase_ ( __A, __A ) -> float:
'''simple docstring'''
def get_matched_characters(__A, __A ) -> str:
UpperCAmelCase__ = []
UpperCAmelCase__ = min(len(_stra ), len(_stra ) ) // 2
for i, l in enumerate(_stra ):
UpperCAmelCase__ = int(max(0, i - limit ) )
UpperCAmelCase__ = int(min(i + limit + 1, len(_stra ) ) )
if l in _stra[left:right]:
matched.append(__A )
UpperCAmelCase__ = f"""{_stra[0:_stra.index(__A )]} {_stra[_stra.index(__A ) + 1:]}"""
return "".join(__A )
# matching characters
UpperCAmelCase__ = get_matched_characters(__A, __A )
UpperCAmelCase__ = get_matched_characters(__A, __A )
UpperCAmelCase__ = len(__A )
# transposition
UpperCAmelCase__ = (
len([(ca, ca) for ca, ca in zip(__A, __A ) if ca != ca] ) // 2
)
if not match_count:
UpperCAmelCase__ = 0.0
else:
UpperCAmelCase__ = (
1
/ 3
* (
match_count / len(__A )
+ match_count / len(__A )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
UpperCAmelCase__ = 0
for ca, ca in zip(stra[:4], stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 65 | 0 |
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ):
'''simple docstring'''
lowercase , lowercase = coefficient_matrix.shape
lowercase , lowercase = constant_matrix.shape
if rowsa != colsa:
lowercase = f'Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'
raise ValueError(lowerCAmelCase__ )
if colsa != 1:
lowercase = f'Constant matrix must be nx1 but received {rowsa}x{colsa}'
raise ValueError(lowerCAmelCase__ )
if rowsa != rowsa:
lowercase = (
'''Coefficient and constant matrices dimensions must be nxn and nx1 but '''
f'received {rowsa}x{colsa} and {rowsa}x{colsa}'
)
raise ValueError(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) != rowsa:
lowercase = (
'''Number of initial values must be equal to number of rows in coefficient '''
f'matrix but received {len(lowerCAmelCase__ )} and {rowsa}'
)
raise ValueError(lowerCAmelCase__ )
if iterations <= 0:
raise ValueError('''Iterations must be at least 1''' )
lowercase = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
lowercase , lowercase = table.shape
strictly_diagonally_dominant(lowerCAmelCase__ )
# Iterates the whole matrix for given number of times
for _ in range(lowerCAmelCase__ ):
lowercase = []
for row in range(lowerCAmelCase__ ):
lowercase = 0
for col in range(lowerCAmelCase__ ):
if col == row:
lowercase = table[row][col]
elif col == cols - 1:
lowercase = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
lowercase = (temp + val) / denom
new_val.append(lowerCAmelCase__ )
lowercase = new_val
return [float(lowerCAmelCase__ ) for i in new_val]
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase , lowercase = table.shape
lowercase = True
for i in range(0 , lowerCAmelCase__ ):
lowercase = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('''Coefficient matrix is not strictly diagonally dominant''' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 101 | def lowerCAmelCase_ ( __A, __A ) -> None:
'''simple docstring'''
UpperCAmelCase__ = len(__A )
print("The following activities are selected:" )
# The first activity is always selected
UpperCAmelCase__ = 0
print(__A, end="," )
# Consider rest of the activities
for j in range(__A ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(__A, end="," )
UpperCAmelCase__ = j
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ = [1, 3, 0, 5, 8, 5]
UpperCamelCase__ = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 65 | 0 |
"""simple docstring"""
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[int] = {"""vocab_file""": """vocab.txt"""}
SCREAMING_SNAKE_CASE : Optional[Any] = {
"""vocab_file""": {
"""openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""",
},
}
SCREAMING_SNAKE_CASE : List[Any] = {
"""openbmb/cpm-ant-10b""": 1024,
}
def lowercase ( _snake_case : List[str] ) ->Optional[Any]:
"""simple docstring"""
__snake_case : int = collections.OrderedDict()
with open(_snake_case , '''r''' , encoding='''utf-8''' ) as reader:
__snake_case : Optional[int] = reader.readlines()
for index, token in enumerate(_snake_case ):
__snake_case : Optional[Any] = token.rstrip('''\n''' )
__snake_case : int = index
return vocab
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , a_ , a_="<unk>" , a_=2_00 ):
'''simple docstring'''
__snake_case : Any = vocab
__snake_case : str = unk_token
__snake_case : Tuple = max_input_chars_per_word
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : str = list(a_ )
if len(a_ ) > self.max_input_chars_per_word:
return [self.unk_token]
__snake_case : List[Any] = 0
__snake_case : Optional[Any] = []
while start < len(a_ ):
__snake_case : List[str] = len(a_ )
__snake_case : List[Any] = None
while start < end:
__snake_case : int = ''''''.join(chars[start:end] )
if substr in self.vocab:
__snake_case : Dict = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(a_ )
__snake_case : List[str] = end
return sub_tokens
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =VOCAB_FILES_NAMES
lowerCamelCase__ =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ =['input_ids', 'attention_mask']
lowerCamelCase__ =False
def __init__(self , a_ , a_="<d>" , a_="</d>" , a_="<s>" , a_="</s>" , a_="<pad>" , a_="<unk>" , a_="</n>" , a_="</_>" , a_="left" , **a_ , ):
'''simple docstring'''
requires_backends(self , ['''jieba'''] )
super().__init__(
bod_token=a_ , eod_token=a_ , bos_token=a_ , eos_token=a_ , pad_token=a_ , unk_token=a_ , line_token=a_ , space_token=a_ , padding_side=a_ , **a_ , )
__snake_case : Union[str, Any] = bod_token
__snake_case : List[Any] = eod_token
__snake_case : List[Any] = load_vocab(a_ )
__snake_case : Dict = self.encoder[space_token]
__snake_case : Any = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
__snake_case : List[str] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda a_ : x[1] ) )
__snake_case : str = {v: k for k, v in self.encoder.items()}
__snake_case : List[Any] = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.encoder[self.bod_token]
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.encoder[self.eod_token]
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.encoder["\n"]
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return len(self.encoder )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : Any = []
for x in jieba.cut(a_ , cut_all=a_ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(a_ ) )
return output_tokens
def SCREAMING_SNAKE_CASE (self , a_ , **a_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = [i for i in token_ids if i >= 0]
__snake_case : Tuple = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(a_ , **a_ )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
return token in self.encoder
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
return "".join(a_ )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
return self.encoder.get(a_ , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
return self.decoder.get(a_ , self.unk_token )
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ):
'''simple docstring'''
if os.path.isdir(a_ ):
__snake_case : Optional[Any] = os.path.join(
a_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
__snake_case : Optional[Any] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
__snake_case : List[str] = 0
if " " in self.encoder:
__snake_case : Optional[Any] = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
__snake_case : int = self.encoder['''\n''']
del self.encoder["\n"]
__snake_case : Dict = collections.OrderedDict(sorted(self.encoder.items() , key=lambda a_ : x[1] ) )
with open(a_ , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
__snake_case : Union[str, Any] = token_index
writer.write(token + '''\n''' )
index += 1
return (vocab_file,)
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None , a_ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ )
if token_ids_a is not None:
return [1] + ([0] * len(a_ )) + [1] + ([0] * len(a_ ))
return [1] + ([0] * len(a_ ))
| 102 | import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
UpperCamelCase__ = 'base_with_context'
def lowerCAmelCase_ ( __A, __A ) -> int:
'''simple docstring'''
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights["token_embedder"]["embedding"] ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ), requires_grad=__A )
for lyr_num, lyr in enumerate(model.encoders ):
UpperCAmelCase__ = weights[f"""layers_{lyr_num}"""]
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
UpperCAmelCase__ = ly_weight["attention"]
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def lowerCAmelCase_ ( __A, __A ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights["input_proj"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ), requires_grad=__A )
for lyr_num, lyr in enumerate(model.encoders ):
UpperCAmelCase__ = weights[f"""layers_{lyr_num}"""]
UpperCAmelCase__ = ly_weight["attention"]
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def lowerCAmelCase_ ( __A, __A ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights["time_emb_dense0"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights["time_emb_dense1"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ), requires_grad=__A )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(weights["continuous_inputs_projection"]["kernel"].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
UpperCAmelCase__ = weights[f"""layers_{lyr_num}"""]
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight["pre_self_attention_layer_norm"]["scale"] ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_0"]["DenseGeneral_0"]["kernel"].T ) )
UpperCAmelCase__ = ly_weight["self_attention"]
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
UpperCAmelCase__ = ly_weight["MultiHeadDotProductAttention_0"]
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight["pre_cross_attention_layer_norm"]["scale"] ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_1"]["DenseGeneral_0"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights["decoder_norm"]["scale"] ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights["spec_out_dense"]["kernel"].T ) )
return model
def lowerCAmelCase_ ( __A ) -> int:
'''simple docstring'''
UpperCAmelCase__ = checkpoints.load_tax_checkpoint(args.checkpoint_path )
UpperCAmelCase__ = jnp.tree_util.tree_map(onp.array, __A )
UpperCAmelCase__ = [
"from __gin__ import dynamic_registration",
"from music_spectrogram_diffusion.models.diffusion import diffusion_utils",
"diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0",
"diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()",
]
UpperCAmelCase__ = os.path.join(args.checkpoint_path, "..", "config.gin" )
UpperCAmelCase__ = inference.parse_training_gin_file(__A, __A )
UpperCAmelCase__ = inference.InferenceModel(args.checkpoint_path, __A )
UpperCAmelCase__ = DDPMScheduler(beta_schedule="squaredcos_cap_v2", variance_type="fixed_large" )
UpperCAmelCase__ = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["inputs"], vocab_size=synth_model.model.module.config.vocab_size, d_model=synth_model.model.module.config.emb_dim, dropout_rate=synth_model.model.module.config.dropout_rate, num_layers=synth_model.model.module.config.num_encoder_layers, num_heads=synth_model.model.module.config.num_heads, d_kv=synth_model.model.module.config.head_dim, d_ff=synth_model.model.module.config.mlp_dim, feed_forward_proj="gated-gelu", )
UpperCAmelCase__ = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims, targets_context_length=synth_model.sequence_length["targets_context"], d_model=synth_model.model.module.config.emb_dim, dropout_rate=synth_model.model.module.config.dropout_rate, num_layers=synth_model.model.module.config.num_encoder_layers, num_heads=synth_model.model.module.config.num_heads, d_kv=synth_model.model.module.config.head_dim, d_ff=synth_model.model.module.config.mlp_dim, feed_forward_proj="gated-gelu", )
UpperCAmelCase__ = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims, targets_length=synth_model.sequence_length["targets_context"], max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time, d_model=synth_model.model.module.config.emb_dim, num_layers=synth_model.model.module.config.num_decoder_layers, num_heads=synth_model.model.module.config.num_heads, d_kv=synth_model.model.module.config.head_dim, d_ff=synth_model.model.module.config.mlp_dim, dropout_rate=synth_model.model.module.config.dropout_rate, )
UpperCAmelCase__ = load_notes_encoder(ta_checkpoint["target"]["token_encoder"], __A )
UpperCAmelCase__ = load_continuous_encoder(ta_checkpoint["target"]["continuous_encoder"], __A )
UpperCAmelCase__ = load_decoder(ta_checkpoint["target"]["decoder"], __A )
UpperCAmelCase__ = OnnxRuntimeModel.from_pretrained("kashif/soundstream_mel_decoder" )
UpperCAmelCase__ = SpectrogramDiffusionPipeline(
notes_encoder=__A, continuous_encoder=__A, decoder=__A, scheduler=__A, melgan=__A, )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument('--output_path', default=None, type=str, required=True, help='Path to the converted model.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument(
'--checkpoint_path',
default=f'''{MODEL}/checkpoint_500000''',
type=str,
required=False,
help='Path to the original jax model checkpoint.',
)
UpperCamelCase__ = parser.parse_args()
main(args)
| 65 | 0 |
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
A__ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCamelCase( __UpperCamelCase : str ):
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(__UpperCamelCase ):
return ext
raise Exception(
f"""Unable to determine file format from file extension {path}. """
f"""Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}""" )
def UpperCamelCase( __UpperCamelCase : Any ):
lowerCAmelCase_ : Dict = pipeline(
task=args.task ,model=args.model if args.model else None ,config=args.config ,tokenizer=args.tokenizer ,device=args.device ,)
lowerCAmelCase_ : List[Any] = try_infer_format_from_ext(args.input ) if args.format == '''infer''' else args.format
lowerCAmelCase_ : Any = PipelineDataFormat.from_str(
format=__UpperCamelCase ,output_path=args.output ,input_path=args.input ,column=args.column if args.column else nlp.default_input_names ,overwrite=args.overwrite ,)
return RunCommand(__UpperCamelCase ,__UpperCamelCase )
class __snake_case ( UpperCamelCase_ ):
def __init__( self : List[str] , A_ : Pipeline , A_ : PipelineDataFormat):
lowerCAmelCase_ : str = nlp
lowerCAmelCase_ : Any = reader
@staticmethod
def UpperCAmelCase__ ( A_ : ArgumentParser):
lowerCAmelCase_ : Union[str, Any] = parser.add_parser('''run''' , help='''Run a pipeline through the CLI''')
run_parser.add_argument('''--task''' , choices=get_supported_tasks() , help='''Task to run''')
run_parser.add_argument('''--input''' , type=A_ , help='''Path to the file to use for inference''')
run_parser.add_argument('''--output''' , type=A_ , help='''Path to the file that will be used post to write results.''')
run_parser.add_argument('''--model''' , type=A_ , help='''Name or path to the model to instantiate.''')
run_parser.add_argument('''--config''' , type=A_ , help='''Name or path to the model\'s config to instantiate.''')
run_parser.add_argument(
'''--tokenizer''' , type=A_ , help='''Name of the tokenizer to use. (default: same as the model name)''')
run_parser.add_argument(
'''--column''' , type=A_ , help='''Name of the column to use as input. (For multi columns input as QA use column1,columns2)''' , )
run_parser.add_argument(
'''--format''' , type=A_ , default='''infer''' , choices=PipelineDataFormat.SUPPORTED_FORMATS , help='''Input format to read from''' , )
run_parser.add_argument(
'''--device''' , type=A_ , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
run_parser.add_argument('''--overwrite''' , action='''store_true''' , help='''Allow overwriting the output file.''')
run_parser.set_defaults(func=A_)
def UpperCAmelCase__ ( self : Union[str, Any]):
lowerCAmelCase_ , lowerCAmelCase_ : int = self._nlp, []
for entry in self._reader:
lowerCAmelCase_ : Union[str, Any] = nlp(**A_) if self._reader.is_multi_columns else nlp(A_)
if isinstance(A_ , A_):
outputs.append(A_)
else:
outputs += output
# Saving data
if self._nlp.binary_output:
lowerCAmelCase_ : Tuple = self._reader.save_binary(A_)
logger.warning(F"""Current pipeline requires output to be in binary format, saving at {binary_path}""")
else:
self._reader.save(A_)
| 103 | import math
def lowerCAmelCase_ ( __A ) -> bool:
'''simple docstring'''
return math.sqrt(__A ) * math.sqrt(__A ) == num
def lowerCAmelCase_ ( __A ) -> bool:
'''simple docstring'''
UpperCAmelCase__ = 0
UpperCAmelCase__ = n
while left <= right:
UpperCAmelCase__ = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
UpperCAmelCase__ = mid - 1
else:
UpperCAmelCase__ = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[Any] ,*lowercase__ : Optional[Any] ,**lowercase__ : List[Any] ):
warnings.warn(
'''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use CLIPImageProcessor instead.''' ,lowercase__ ,)
super().__init__(*lowercase__ ,**lowercase__ )
| 104 | import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : torch.FloatTensor
__UpperCAmelCase : Optional[torch.FloatTensor] = None
def lowerCAmelCase_ ( __A, __A=0.999, __A="cosine", ) -> Tuple:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(__A ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__A ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
UpperCAmelCase__ = []
for i in range(__A ):
UpperCAmelCase__ = i / num_diffusion_timesteps
UpperCAmelCase__ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__A ) / alpha_bar_fn(__A ), __A ) )
return torch.tensor(__A, dtype=torch.floataa )
class A ( UpperCAmelCase_ , UpperCAmelCase_ ):
@register_to_config
def __init__(self : List[str] , __UpperCAmelCase : int = 1_0_0_0 , __UpperCAmelCase : str = "fixed_small_log" , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[float] = 1.0 , __UpperCAmelCase : str = "epsilon" , __UpperCAmelCase : str = "squaredcos_cap_v2" , ) -> Optional[int]:
"""simple docstring"""
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" )
UpperCAmelCase__ = betas_for_alpha_bar(__UpperCAmelCase )
UpperCAmelCase__ = 1.0 - self.betas
UpperCAmelCase__ = torch.cumprod(self.alphas , dim=0 )
UpperCAmelCase__ = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
UpperCAmelCase__ = 1.0
# setable values
UpperCAmelCase__ = None
UpperCAmelCase__ = torch.from_numpy(np.arange(0 , __UpperCAmelCase )[::-1].copy() )
UpperCAmelCase__ = variance_type
def lowercase_ (self : List[str] , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : Optional[int] = None ) -> torch.FloatTensor:
"""simple docstring"""
return sample
def lowercase_ (self : int , __UpperCAmelCase : int , __UpperCAmelCase : Union[str, torch.device] = None ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = num_inference_steps
UpperCAmelCase__ = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
UpperCAmelCase__ = (np.arange(0 , __UpperCAmelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa )
UpperCAmelCase__ = torch.from_numpy(__UpperCAmelCase ).to(__UpperCAmelCase )
def lowercase_ (self : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : List[str]=None ) -> Tuple:
"""simple docstring"""
if prev_timestep is None:
UpperCAmelCase__ = t - 1
UpperCAmelCase__ = self.alphas_cumprod[t]
UpperCAmelCase__ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
UpperCAmelCase__ = 1 - alpha_prod_t
UpperCAmelCase__ = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
UpperCAmelCase__ = self.betas[t]
else:
UpperCAmelCase__ = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCAmelCase__ = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
UpperCAmelCase__ = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
UpperCAmelCase__ = torch.log(torch.clamp(__UpperCAmelCase , min=1E-20 ) )
UpperCAmelCase__ = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
UpperCAmelCase__ = variance.log()
UpperCAmelCase__ = beta.log()
UpperCAmelCase__ = (predicted_variance + 1) / 2
UpperCAmelCase__ = frac * max_log + (1 - frac) * min_log
return variance
def lowercase_ (self : Optional[int] , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : int , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : bool = True , ) -> Union[UnCLIPSchedulerOutput, Tuple]:
"""simple docstring"""
UpperCAmelCase__ = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
UpperCAmelCase__ , UpperCAmelCase__ = torch.split(__UpperCAmelCase , sample.shape[1] , dim=1 )
else:
UpperCAmelCase__ = None
# 1. compute alphas, betas
if prev_timestep is None:
UpperCAmelCase__ = t - 1
UpperCAmelCase__ = self.alphas_cumprod[t]
UpperCAmelCase__ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
UpperCAmelCase__ = 1 - alpha_prod_t
UpperCAmelCase__ = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
UpperCAmelCase__ = self.betas[t]
UpperCAmelCase__ = self.alphas[t]
else:
UpperCAmelCase__ = 1 - alpha_prod_t / alpha_prod_t_prev
UpperCAmelCase__ = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCAmelCase__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCAmelCase__ = model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"""
" for the UnCLIPScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCAmelCase__ = torch.clamp(
__UpperCAmelCase , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
UpperCAmelCase__ = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
UpperCAmelCase__ = 0
if t > 0:
UpperCAmelCase__ = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=__UpperCAmelCase , device=model_output.device )
UpperCAmelCase__ = self._get_variance(
__UpperCAmelCase , predicted_variance=__UpperCAmelCase , prev_timestep=__UpperCAmelCase , )
if self.variance_type == "fixed_small_log":
UpperCAmelCase__ = variance
elif self.variance_type == "learned_range":
UpperCAmelCase__ = (0.5 * variance).exp()
else:
raise ValueError(
f"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"""
" for the UnCLIPScheduler." )
UpperCAmelCase__ = variance * variance_noise
UpperCAmelCase__ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=__UpperCAmelCase , pred_original_sample=__UpperCAmelCase )
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : torch.IntTensor , ) -> torch.FloatTensor:
"""simple docstring"""
UpperCAmelCase__ = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
UpperCAmelCase__ = timesteps.to(original_samples.device )
UpperCAmelCase__ = alphas_cumprod[timesteps] ** 0.5
UpperCAmelCase__ = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
UpperCAmelCase__ = sqrt_alpha_prod.unsqueeze(-1 )
UpperCAmelCase__ = (1 - alphas_cumprod[timesteps]) ** 0.5
UpperCAmelCase__ = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
UpperCAmelCase__ = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
UpperCAmelCase__ = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 65 | 0 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : int ) ->int:
'''simple docstring'''
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(_lowercase , _lowercase ):
raise TypeError("Input value must be a 'int' type" )
return bin(_lowercase ).count("1" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 105 | import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class A ( unittest.TestCase ):
def lowercase_ (self : Union[str, Any] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = inspect.getfile(accelerate.test_utils )
UpperCAmelCase__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
UpperCAmelCase__ = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] )
UpperCAmelCase__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] )
@require_multi_gpu
def lowercase_ (self : List[str] ) -> Any:
"""simple docstring"""
print(f"""Found {torch.cuda.device_count()} devices.""" )
UpperCAmelCase__ = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() )
@require_multi_gpu
def lowercase_ (self : str ) -> str:
"""simple docstring"""
print(f"""Found {torch.cuda.device_count()} devices.""" )
UpperCAmelCase__ = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path]
print(f"""Command: {cmd}""" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() )
@require_multi_gpu
def lowercase_ (self : Tuple ) -> int:
"""simple docstring"""
UpperCAmelCase__ = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() )
@require_multi_gpu
def lowercase_ (self : Dict ) -> str:
"""simple docstring"""
print(f"""Found {torch.cuda.device_count()} devices, using 2 devices only""" )
UpperCAmelCase__ = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() )
if __name__ == "__main__":
UpperCamelCase__ = Accelerator()
UpperCamelCase__ = (accelerator.state.process_index + 2, 1_0)
UpperCamelCase__ = torch.randint(0, 1_0, shape).to(accelerator.device)
UpperCamelCase__ = ''
UpperCamelCase__ = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
UpperCamelCase__ = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
UpperCamelCase__ = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 65 | 0 |
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__UpperCamelCase : int = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
__UpperCamelCase : Dict = ''' def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
'''
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Dict ):
lowerCAmelCase__ : Optional[Any] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir ,'''models/bert/''' ) )
lowerCAmelCase__ : int = self.transformer_dir
shutil.copy(
os.path.join(lowercase_ ,'''src/transformers/models/bert/modeling_bert.py''' ) ,os.path.join(self.transformer_dir ,'''models/bert/modeling_bert.py''' ) ,)
def __lowerCAmelCase ( self : str ):
lowerCAmelCase__ : Tuple = '''src/transformers'''
shutil.rmtree(self.transformer_dir )
def __lowerCAmelCase ( self : Optional[int] ,lowercase_ : Tuple ,lowercase_ : List[str] ,lowercase_ : List[str] ,lowercase_ : Any=None ):
lowerCAmelCase__ : Any = comment + F'\nclass {class_name}(nn.Module):\n' + class_code
if overwrite_result is not None:
lowerCAmelCase__ : str = comment + F'\nclass {class_name}(nn.Module):\n' + overwrite_result
lowerCAmelCase__ : str = black.Mode(target_versions={black.TargetVersion.PYaa} ,line_length=1_1_9 )
lowerCAmelCase__ : Union[str, Any] = black.format_str(lowercase_ ,mode=lowercase_ )
lowerCAmelCase__ : Any = os.path.join(self.transformer_dir ,'''new_code.py''' )
with open(lowercase_ ,'''w''' ,newline='''\n''' ) as f:
f.write(lowercase_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowercase_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name ,overwrite=lowercase_ )
with open(lowercase_ ,'''r''' ) as f:
self.assertTrue(f.read() ,lowercase_ )
def __lowerCAmelCase ( self : List[Any] ):
lowerCAmelCase__ : List[Any] = check_copies.find_code_in_transformers('''models.bert.modeling_bert.BertLMPredictionHead''' )
self.assertEqual(lowercase_ ,lowercase_ )
def __lowerCAmelCase ( self : Union[str, Any] ):
# Base copy consistency
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' ,'''BertLMPredictionHead''' ,REFERENCE_CODE + '''\n''' ,)
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' ,'''BertLMPredictionHead''' ,lowercase_ ,)
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' ,'''TestModelLMPredictionHead''' ,re.sub('''Bert''' ,'''TestModel''' ,lowercase_ ) ,)
# Copy consistency with a really long name
lowerCAmelCase__ : List[Any] = '''TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}' ,F'{long_class_name}LMPredictionHead' ,re.sub('''Bert''' ,lowercase_ ,lowercase_ ) ,)
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' ,'''TestModelLMPredictionHead''' ,lowercase_ ,overwrite_result=re.sub('''Bert''' ,'''TestModel''' ,lowercase_ ) ,)
def __lowerCAmelCase ( self : Tuple ):
lowerCAmelCase__ : List[str] = check_copies.LOCALIZED_READMES['''README_zh-hans.md''']
lowerCAmelCase__ : str = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'''
''' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'''
''' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'''
''' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'''
''' Luong, Quoc V. Le, Christopher D. Manning.'''
)
lowerCAmelCase__ : Union[str, Any] = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
lowerCAmelCase__ : int = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'''
''' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'''
''' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'''
''' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'''
''' Christopher D. Manning 发布。\n'''
)
lowerCAmelCase__ ,lowerCAmelCase__ : Optional[Any] = check_copies.convert_to_localized_md(
lowercase_ ,lowercase_ ,localized_readme['''format_model_list'''] )
self.assertFalse(lowercase_ )
self.assertEqual(lowercase_ ,lowercase_ )
lowerCAmelCase__ ,lowerCAmelCase__ : int = check_copies.convert_to_localized_md(
lowercase_ ,lowercase_ ,localized_readme['''format_model_list'''] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(lowercase_ )
lowerCAmelCase__ : List[str] = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'''
)
lowerCAmelCase__ : Optional[int] = (
'''1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'''
''' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
lowerCAmelCase__ : str = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
lowerCAmelCase__ ,lowerCAmelCase__ : Any = check_copies.convert_to_localized_md(
lowercase_ ,lowercase_ ,localized_readme['''format_model_list'''] )
# Check if the model link is synchronized.
self.assertEqual(lowercase_ ,lowercase_ )
| 106 | import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def lowerCAmelCase_ ( __A ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
"decoder.output_projection.weight",
]
for k in ignore_keys:
state_dict.pop(__A, __A )
def lowerCAmelCase_ ( __A ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ = emb.weight.shape
UpperCAmelCase__ = nn.Linear(__A, __A, bias=__A )
UpperCAmelCase__ = emb.weight.data
return lin_layer
def lowerCAmelCase_ ( __A, __A="facebook/mbart-large-en-ro", __A=False, __A=False ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ = torch.load(__A, map_location="cpu" )["model"]
remove_ignore_keys_(__A )
UpperCAmelCase__ = state_dict["encoder.embed_tokens.weight"].shape[0]
UpperCAmelCase__ = MBartConfig.from_pretrained(__A, vocab_size=__A )
if mbart_aa and finetuned:
UpperCAmelCase__ = "relu"
UpperCAmelCase__ = state_dict["decoder.embed_tokens.weight"]
UpperCAmelCase__ = MBartForConditionalGeneration(__A )
model.model.load_state_dict(__A )
if finetuned:
UpperCAmelCase__ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config',
default='facebook/mbart-large-cc25',
type=str,
help='Which huggingface architecture to use: mbart-large',
)
parser.add_argument('--mbart_50', action='store_true', help='whether the model is mMART-50 checkpoint')
parser.add_argument('--finetuned', action='store_true', help='whether the model is a fine-tuned checkpoint')
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 65 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCAmelCase : Any = {
'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'],
'configuration_data2vec_text': [
'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecTextConfig',
'Data2VecTextOnnxConfig',
],
'configuration_data2vec_vision': [
'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecVisionConfig',
'Data2VecVisionOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = [
'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecAudioForAudioFrameClassification',
'Data2VecAudioForCTC',
'Data2VecAudioForSequenceClassification',
'Data2VecAudioForXVector',
'Data2VecAudioModel',
'Data2VecAudioPreTrainedModel',
]
__lowerCAmelCase : Dict = [
'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecTextForCausalLM',
'Data2VecTextForMaskedLM',
'Data2VecTextForMultipleChoice',
'Data2VecTextForQuestionAnswering',
'Data2VecTextForSequenceClassification',
'Data2VecTextForTokenClassification',
'Data2VecTextModel',
'Data2VecTextPreTrainedModel',
]
__lowerCAmelCase : Dict = [
'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecVisionForImageClassification',
'Data2VecVisionForMaskedImageModeling',
'Data2VecVisionForSemanticSegmentation',
'Data2VecVisionModel',
'Data2VecVisionPreTrainedModel',
]
if is_tf_available():
__lowerCAmelCase : Optional[Any] = [
'TFData2VecVisionForImageClassification',
'TFData2VecVisionForSemanticSegmentation',
'TFData2VecVisionModel',
'TFData2VecVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 107 | from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
UpperCamelCase__ = [
'python',
'tqdm',
'regex',
'requests',
'packaging',
'filelock',
'numpy',
'tokenizers',
'huggingface-hub',
'safetensors',
'accelerate',
'pyyaml',
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def lowerCAmelCase_ ( __A, __A=None ) -> Dict:
'''simple docstring'''
require_version(deps[pkg], __A )
| 65 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : str ="llama"
a : List[str] =["past_key_values"]
def __init__( self , snake_case__=32_000 , snake_case__=4_096 , snake_case__=11_008 , snake_case__=32 , snake_case__=32 , snake_case__=None , snake_case__="silu" , snake_case__=2_048 , snake_case__=0.02 , snake_case__=1e-6 , snake_case__=True , snake_case__=0 , snake_case__=1 , snake_case__=2 , snake_case__=1 , snake_case__=False , snake_case__=None , **snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = vocab_size
lowerCAmelCase : str = max_position_embeddings
lowerCAmelCase : str = hidden_size
lowerCAmelCase : Optional[int] = intermediate_size
lowerCAmelCase : Any = num_hidden_layers
lowerCAmelCase : List[str] = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
lowerCAmelCase : Tuple = num_attention_heads
lowerCAmelCase : Dict = num_key_value_heads
lowerCAmelCase : Optional[Any] = hidden_act
lowerCAmelCase : Optional[Any] = initializer_range
lowerCAmelCase : Any = rms_norm_eps
lowerCAmelCase : List[Any] = pretraining_tp
lowerCAmelCase : int = use_cache
lowerCAmelCase : List[str] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , tie_word_embeddings=snake_case__ , **snake_case__ , )
def lowercase__ ( self ):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , snake_case__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f"""got {self.rope_scaling}""" )
lowerCAmelCase : Optional[Any] = self.rope_scaling.get("type" , snake_case__ )
lowerCAmelCase : int = self.rope_scaling.get("factor" , snake_case__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(snake_case__ , snake_case__ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 108 | import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
UpperCamelCase__ = logging.getLogger(__name__)
def lowerCAmelCase_ ( ) -> int:
'''simple docstring'''
UpperCAmelCase__ = argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." )
parser.add_argument("--file_path", type=__A, default="data/dump.txt", help="The path to the data." )
parser.add_argument("--tokenizer_type", type=__A, default="bert", choices=["bert", "roberta", "gpt2"] )
parser.add_argument("--tokenizer_name", type=__A, default="bert-base-uncased", help="The tokenizer to use." )
parser.add_argument("--dump_file", type=__A, default="data/dump", help="The dump file prefix." )
UpperCAmelCase__ = parser.parse_args()
logger.info(f"""Loading Tokenizer ({args.tokenizer_name})""" )
if args.tokenizer_type == "bert":
UpperCAmelCase__ = BertTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase__ = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
UpperCAmelCase__ = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
UpperCAmelCase__ = RobertaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase__ = tokenizer.special_tokens_map["cls_token"] # `<s>`
UpperCAmelCase__ = tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
UpperCAmelCase__ = GPTaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase__ = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
UpperCAmelCase__ = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(f"""Loading text from {args.file_path}""" )
with open(args.file_path, "r", encoding="utf8" ) as fp:
UpperCAmelCase__ = fp.readlines()
logger.info("Start encoding" )
logger.info(f"""{len(__A )} examples to process.""" )
UpperCAmelCase__ = []
UpperCAmelCase__ = 0
UpperCAmelCase__ = 10_000
UpperCAmelCase__ = time.time()
for text in data:
UpperCAmelCase__ = f"""{bos} {text.strip()} {sep}"""
UpperCAmelCase__ = tokenizer.encode(__A, add_special_tokens=__A )
rslt.append(__A )
iter += 1
if iter % interval == 0:
UpperCAmelCase__ = time.time()
logger.info(f"""{iter} examples processed. - {(end-start):.2f}s/{interval}expl""" )
UpperCAmelCase__ = time.time()
logger.info("Finished binarization" )
logger.info(f"""{len(__A )} examples processed.""" )
UpperCAmelCase__ = f"""{args.dump_file}.{args.tokenizer_name}.pickle"""
UpperCAmelCase__ = tokenizer.vocab_size
if vocab_size < (1 << 16):
UpperCAmelCase__ = [np.uintaa(__A ) for d in rslt]
else:
UpperCAmelCase__ = [np.intaa(__A ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f"""Dump to {dp_file}""" )
with open(__A, "wb" ) as handle:
pickle.dump(rslt_, __A, protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 65 | 0 |
"""simple docstring"""
from ... import PretrainedConfig
A: Any = {
"sijunhe/nezha-cn-base": "https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : Optional[Any] = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
__lowerCAmelCase : Tuple = 'nezha'
def __init__( self , _SCREAMING_SNAKE_CASE=21128 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = vocab_size
UpperCAmelCase : Optional[Any] = hidden_size
UpperCAmelCase : List[Any] = num_hidden_layers
UpperCAmelCase : Union[str, Any] = num_attention_heads
UpperCAmelCase : List[str] = hidden_act
UpperCAmelCase : Tuple = intermediate_size
UpperCAmelCase : Optional[int] = hidden_dropout_prob
UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase : Any = max_position_embeddings
UpperCAmelCase : Any = max_relative_position
UpperCAmelCase : Tuple = type_vocab_size
UpperCAmelCase : str = initializer_range
UpperCAmelCase : Optional[int] = layer_norm_eps
UpperCAmelCase : str = classifier_dropout
UpperCAmelCase : List[str] = use_cache
| 109 | from manim import *
class A ( UpperCAmelCase_ ):
def lowercase_ (self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = Rectangle(height=0.5 , width=0.5 )
UpperCAmelCase__ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCAmelCase__ = Rectangle(height=0.25 , width=0.25 )
UpperCAmelCase__ = [mem.copy() for i in range(6 )]
UpperCAmelCase__ = [mem.copy() for i in range(6 )]
UpperCAmelCase__ = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase__ = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase__ = VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase__ = Text("CPU" , font_size=2_4 )
UpperCAmelCase__ = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__UpperCAmelCase )
UpperCAmelCase__ = [mem.copy() for i in range(4 )]
UpperCAmelCase__ = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase__ = Text("GPU" , font_size=2_4 )
UpperCAmelCase__ = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(__UpperCAmelCase )
UpperCAmelCase__ = [mem.copy() for i in range(6 )]
UpperCAmelCase__ = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase__ = Text("Model" , font_size=2_4 )
UpperCAmelCase__ = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(__UpperCAmelCase )
UpperCAmelCase__ = []
UpperCAmelCase__ = []
for i, rect in enumerate(__UpperCAmelCase ):
UpperCAmelCase__ = fill.copy().set_fill(__UpperCAmelCase , opacity=0.8 )
target.move_to(__UpperCAmelCase )
model_arr.append(__UpperCAmelCase )
UpperCAmelCase__ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(__UpperCAmelCase , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(__UpperCAmelCase )
self.add(*__UpperCAmelCase , *__UpperCAmelCase )
UpperCAmelCase__ = [meta_mem.copy() for i in range(6 )]
UpperCAmelCase__ = [meta_mem.copy() for i in range(6 )]
UpperCAmelCase__ = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase__ = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase__ = VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase__ = Text("Disk" , font_size=2_4 )
UpperCAmelCase__ = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
disk.move_to([-4, -1.25, 0] )
self.add(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCAmelCase__ = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=1_8 , )
blue_text.next_to(__UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__UpperCAmelCase )
UpperCAmelCase__ = MarkupText(
f"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCAmelCase ) )
UpperCAmelCase__ = Square(0.3 )
input.set_fill(__UpperCAmelCase , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , __UpperCAmelCase , buff=0.5 )
self.play(Write(__UpperCAmelCase ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=__UpperCAmelCase , buff=0.02 )
self.play(MoveToTarget(__UpperCAmelCase ) )
self.play(FadeOut(__UpperCAmelCase ) )
UpperCAmelCase__ = Arrow(start=__UpperCAmelCase , end=__UpperCAmelCase , color=__UpperCAmelCase , buff=0.5 )
a.next_to(model_arr[0].get_left() , __UpperCAmelCase , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
UpperCAmelCase__ = MarkupText(
f"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCAmelCase , run_time=3 ) )
UpperCAmelCase__ = {"run_time": 1, "fade_in": True, "fade_out": True, "buff": 0.02}
self.play(
Write(__UpperCAmelCase ) , Circumscribe(model_arr[0] , color=__UpperCAmelCase , **__UpperCAmelCase ) , Circumscribe(model_cpu_arr[0] , color=__UpperCAmelCase , **__UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=__UpperCAmelCase , **__UpperCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
UpperCAmelCase__ = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , __UpperCAmelCase , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
UpperCAmelCase__ = AnimationGroup(
FadeOut(__UpperCAmelCase , run_time=0.5 ) , MoveToTarget(__UpperCAmelCase , run_time=0.5 ) , FadeIn(__UpperCAmelCase , run_time=0.5 ) , lag_ratio=0.2 )
self.play(__UpperCAmelCase )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
UpperCAmelCase__ = 0.7
self.play(
Circumscribe(model_arr[i] , **__UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i] , **__UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=__UpperCAmelCase , **__UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=__UpperCAmelCase , **__UpperCAmelCase ) , Circumscribe(model_arr[i + 1] , color=__UpperCAmelCase , **__UpperCAmelCase ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=__UpperCAmelCase , **__UpperCAmelCase ) , Circumscribe(cpu_left_col_base[-1] , color=__UpperCAmelCase , **__UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=__UpperCAmelCase , **__UpperCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
UpperCAmelCase__ = a_c
UpperCAmelCase__ = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(__UpperCAmelCase ) , FadeOut(__UpperCAmelCase , run_time=0.5 ) , )
UpperCAmelCase__ = MarkupText(f"""Inference on a model too large for GPU memory\nis successfully completed.""" , font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCAmelCase , run_time=3 ) , MoveToTarget(__UpperCAmelCase ) )
self.wait()
| 65 | 0 |
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : str = DebertaTokenizer
_lowercase : Any = True
_lowercase : Union[str, Any] = DebertaTokenizerFast
def lowerCamelCase_ ( self: Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
lowercase__ = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
lowercase__ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowercase__ = {'''unk_token''': '''[UNK]'''}
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCamelCase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCamelCase_ ) )
def lowerCamelCase_ ( self: Union[str, Any] , **UpperCamelCase_: Union[str, Any] ) -> Dict:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: int ) -> Tuple:
"""simple docstring"""
lowercase__ = '''lower newer'''
lowercase__ = '''lower newer'''
return input_text, output_text
def lowerCamelCase_ ( self: Optional[int] ) -> Any:
"""simple docstring"""
lowercase__ = self.get_tokenizer()
lowercase__ = '''lower newer'''
lowercase__ = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
lowercase__ = tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = tokens + [tokenizer.unk_token]
lowercase__ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , UpperCamelCase_ )
def lowerCamelCase_ ( self: List[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ = self.get_tokenizer()
lowercase__ = tokenizer('''Hello''' , '''World''' )
lowercase__ = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , UpperCamelCase_ )
@slow
def lowerCamelCase_ ( self: int ) -> List[str]:
"""simple docstring"""
lowercase__ = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
lowercase__ = tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCamelCase_ )
lowercase__ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCamelCase_ )
lowercase__ = tokenizer.encode(
'''sequence builders''' , add_special_tokens=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ )
lowercase__ = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ )
lowercase__ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ )
lowercase__ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ , UpperCamelCase_ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def lowerCamelCase_ ( self: str ) -> Dict:
"""simple docstring"""
lowercase__ = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
lowercase__ = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
lowercase__ = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
lowercase__ = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ )
lowercase__ = [tokenizer.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ ) for seq in encoding['''input_ids''']]
# fmt: off
lowercase__ = {
'''input_ids''': [
[1, 2_118, 11_126, 565, 35, 83, 25_191, 163, 18_854, 13, 12_156, 12, 16_101, 25_376, 13_807, 9, 22_205, 27_893, 1_635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2_118, 11_126, 565, 24_536, 80, 43_797, 4_878, 7_373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3_724, 1_538, 33_183, 11_303, 43_797, 1_938, 4, 870, 24_165, 29_105, 5, 739, 32_644, 33_183, 11_303, 36_173, 88, 80, 650, 7_821, 45_940, 6, 52, 2_559, 5, 1_836, 9, 5, 7_397, 13_171, 31, 5, 1_836, 9, 32_644, 33_183, 11_303, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
lowercase__ = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , UpperCamelCase_ )
for expected, decoded in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
| 110 | from __future__ import annotations
from scipy.special import comb # type: ignore
class A :
def __init__(self : List[Any] , __UpperCAmelCase : list[tuple[float, float]] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
UpperCAmelCase__ = len(__UpperCAmelCase ) - 1
def lowercase_ (self : int , __UpperCAmelCase : float ) -> list[float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
UpperCAmelCase__ = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , __UpperCAmelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(__UpperCAmelCase ) , 5 ) == 1
return output_values
def lowercase_ (self : Dict , __UpperCAmelCase : float ) -> tuple[float, float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
UpperCAmelCase__ = self.basis_function(__UpperCAmelCase )
UpperCAmelCase__ = 0.0
UpperCAmelCase__ = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def lowercase_ (self : Optional[int] , __UpperCAmelCase : float = 0.01 ) -> Optional[int]:
"""simple docstring"""
from matplotlib import pyplot as plt # type: ignore
UpperCAmelCase__ = [] # x coordinates of points to plot
UpperCAmelCase__ = [] # y coordinates of points to plot
UpperCAmelCase__ = 0.0
while t <= 1:
UpperCAmelCase__ = self.bezier_curve_function(__UpperCAmelCase )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
UpperCAmelCase__ = [i[0] for i in self.list_of_points]
UpperCAmelCase__ = [i[1] for i in self.list_of_points]
plt.plot(
__UpperCAmelCase , __UpperCAmelCase , color="blue" , label="Curve of Degree " + str(self.degree ) , )
plt.scatter(__UpperCAmelCase , __UpperCAmelCase , color="red" , label="Control Points" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 65 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
_lowerCamelCase : List[Any] = None
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : List[Any] = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
_lowerCamelCase : Any = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/tokenizer.json",
},
}
_lowerCamelCase : Optional[int] = {
"camembert-base": 5_1_2,
}
_lowerCamelCase : str = "▁"
class __UpperCAmelCase ( UpperCAmelCase_ ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ['input_ids', 'attention_mask']
UpperCamelCase = CamembertTokenizer
def __init__( self : Optional[Any], __A : List[str]=None, __A : Any=None, __A : Tuple="<s>", __A : int="</s>", __A : Union[str, Any]="</s>", __A : Optional[int]="<s>", __A : str="<unk>", __A : int="<pad>", __A : Dict="<mask>", __A : Union[str, Any]=["<s>NOTUSED", "</s>NOTUSED"], **__A : List[Any], ):
UpperCAmelCase : str = AddedToken(__UpperCAmelCase, lstrip=__UpperCAmelCase, rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase, __UpperCAmelCase ) else mask_token
super().__init__(
__UpperCAmelCase, tokenizer_file=__UpperCAmelCase, bos_token=__UpperCAmelCase, eos_token=__UpperCAmelCase, sep_token=__UpperCAmelCase, cls_token=__UpperCAmelCase, unk_token=__UpperCAmelCase, pad_token=__UpperCAmelCase, mask_token=__UpperCAmelCase, additional_special_tokens=__UpperCAmelCase, **__UpperCAmelCase, )
UpperCAmelCase : Union[str, Any] = vocab_file
UpperCAmelCase : Any = False if not self.vocab_file else True
def __magic_name__ ( self : Any, __A : List[int], __A : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase : Dict = [self.cls_token_id]
UpperCAmelCase : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __magic_name__ ( self : Any, __A : List[int], __A : Optional[List[int]] = None ):
UpperCAmelCase : Tuple = [self.sep_token_id]
UpperCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __magic_name__ ( self : Optional[Any], __A : str, __A : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase : Optional[Any] = os.path.join(
__UpperCAmelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.vocab_file, __UpperCAmelCase )
return (out_vocab_file,)
| 336 | import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(UpperCAmelCase_ ) , 'Tatoeba directory does not exist.' )
class A ( unittest.TestCase ):
@cached_property
def lowercase_ (self : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = tempfile.mkdtemp()
return TatoebaConverter(save_dir=__UpperCAmelCase )
@slow
def lowercase_ (self : List[Any] ) -> Optional[int]:
"""simple docstring"""
self.resolver.convert_models(["heb-eng"] )
@slow
def lowercase_ (self : Dict ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.resolver.write_model_card("opus-mt-he-en" , dry_run=__UpperCAmelCase )
assert mmeta["long_pair"] == "heb-eng"
| 65 | 0 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def _lowerCamelCase( a ):
__a = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
"decoder.output_projection.weight",
]
for k in ignore_keys:
state_dict.pop(__A , __A )
def _lowerCamelCase( a ):
__a , __a = emb.weight.shape
__a = nn.Linear(__A , __A , bias=__A )
__a = emb.weight.data
return lin_layer
def _lowerCamelCase( a , a="facebook/mbart-large-en-ro" , a=False , a=False ):
__a = torch.load(__A , map_location="cpu" )["model"]
remove_ignore_keys_(__A )
__a = state_dict["encoder.embed_tokens.weight"].shape[0]
__a = MBartConfig.from_pretrained(__A , vocab_size=__A )
if mbart_aa and finetuned:
__a = "relu"
__a = state_dict["decoder.embed_tokens.weight"]
__a = MBartForConditionalGeneration(__A )
model.model.load_state_dict(__A )
if finetuned:
__a = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""fairseq_path""", type=str, help="""bart.large, bart.large.cnn or a path to a model.pt on local filesystem."""
)
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--hf_config""",
default="""facebook/mbart-large-cc25""",
type=str,
help="""Which huggingface architecture to use: mbart-large""",
)
parser.add_argument("""--mbart_50""", action="""store_true""", help="""whether the model is mMART-50 checkpoint""")
parser.add_argument("""--finetuned""", action="""store_true""", help="""whether the model is a fine-tuned checkpoint""")
SCREAMING_SNAKE_CASE__:Dict = parser.parse_args()
SCREAMING_SNAKE_CASE__:List[Any] = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 261 | import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
UpperCamelCase__ = np.linspace(start=0, stop=7_5, num=7_5, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
UpperCamelCase__ = [0, 2_5, 5_0]
UpperCamelCase__ = [2_5, 5_0, 7_5]
UpperCamelCase__ = fuzz.membership.trimf(X, abca)
UpperCamelCase__ = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
UpperCamelCase__ = np.ones(7_5)
UpperCamelCase__ = np.zeros((7_5,))
# 1. Union = max(µA(x), µB(x))
UpperCamelCase__ = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
UpperCamelCase__ = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
UpperCamelCase__ = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
UpperCamelCase__ = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
UpperCamelCase__ = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
UpperCamelCase__ = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
UpperCamelCase__ = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
UpperCamelCase__ = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 1_0)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 65 | 0 |
"""simple docstring"""
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
_snake_case = '.'
if __name__ == "__main__":
_snake_case = os.path.join(REPO_PATH, 'utils/documentation_tests.txt')
_snake_case = []
_snake_case = []
with open(doctest_file_path) as fp:
for line in fp:
_snake_case = line.strip()
_snake_case = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
_snake_case = '\n'.join(non_existent_paths)
raise ValueError(F'''`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}''')
if all_paths != sorted(all_paths):
raise ValueError('Files in `utils/documentation_tests.txt` are not in alphabetical order.')
| 294 | from __future__ import annotations
from collections import deque
class A :
def __init__(self : Dict , __UpperCAmelCase : list[str] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = []
self.adlist.append(
{"value": "", "next_states": [], "fail_state": 0, "output": []} )
for keyword in keywords:
self.add_keyword(__UpperCAmelCase )
self.set_fail_transitions()
def lowercase_ (self : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : str ) -> int | None:
"""simple docstring"""
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def lowercase_ (self : Dict , __UpperCAmelCase : str ) -> None:
"""simple docstring"""
UpperCAmelCase__ = 0
for character in keyword:
UpperCAmelCase__ = self.find_next_state(__UpperCAmelCase , __UpperCAmelCase )
if next_state is None:
self.adlist.append(
{
"value": character,
"next_states": [],
"fail_state": 0,
"output": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
UpperCAmelCase__ = len(self.adlist ) - 1
else:
UpperCAmelCase__ = next_state
self.adlist[current_state]["output"].append(__UpperCAmelCase )
def lowercase_ (self : Optional[int] ) -> None:
"""simple docstring"""
UpperCAmelCase__ = deque()
for node in self.adlist[0]["next_states"]:
q.append(__UpperCAmelCase )
UpperCAmelCase__ = 0
while q:
UpperCAmelCase__ = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(__UpperCAmelCase )
UpperCAmelCase__ = self.adlist[r]["fail_state"]
while (
self.find_next_state(__UpperCAmelCase , self.adlist[child]["value"] ) is None
and state != 0
):
UpperCAmelCase__ = self.adlist[state]["fail_state"]
UpperCAmelCase__ = self.find_next_state(
__UpperCAmelCase , self.adlist[child]["value"] )
if self.adlist[child]["fail_state"] is None:
UpperCAmelCase__ = 0
UpperCAmelCase__ = (
self.adlist[child]["output"]
+ self.adlist[self.adlist[child]["fail_state"]]["output"]
)
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : str ) -> dict[str, list[int]]:
"""simple docstring"""
UpperCAmelCase__ = {} # returns a dict with keywords and list of its occurrences
UpperCAmelCase__ = 0
for i in range(len(__UpperCAmelCase ) ):
while (
self.find_next_state(__UpperCAmelCase , string[i] ) is None
and current_state != 0
):
UpperCAmelCase__ = self.adlist[current_state]["fail_state"]
UpperCAmelCase__ = self.find_next_state(__UpperCAmelCase , string[i] )
if next_state is None:
UpperCAmelCase__ = 0
else:
UpperCAmelCase__ = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
UpperCAmelCase__ = []
result[key].append(i - len(__UpperCAmelCase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 | 0 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : Dict ):
"""simple docstring"""
if not len(__A ) == len(__A ) == 3:
raise ValueError("""Please enter a valid equation.""" )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("""Both a & b of two equations can't be zero.""" )
# Extract the coefficients
_snake_case , _snake_case , _snake_case : Dict = equationa
_snake_case , _snake_case , _snake_case : Dict = equationa
# Calculate the determinants of the matrices
_snake_case : List[Any] = aa * ba - aa * ba
_snake_case : List[str] = ca * ba - ca * ba
_snake_case : int = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("""Infinite solutions. (Consistent system)""" )
else:
raise ValueError("""No solution. (Inconsistent system)""" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_snake_case : Dict = determinant_x / determinant
_snake_case : Any = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 64 | import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
UpperCamelCase__ = logging.get_logger(__name__)
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : int = ['input_values', 'attention_mask']
def __init__(self : Any , __UpperCAmelCase : int = 1 , __UpperCAmelCase : int = 1_6_0_0_0 , __UpperCAmelCase : float = 0.0 , __UpperCAmelCase : bool = False , __UpperCAmelCase : int = 8_0 , __UpperCAmelCase : int = 1_6 , __UpperCAmelCase : int = 6_4 , __UpperCAmelCase : str = "hann_window" , __UpperCAmelCase : float = 1.0 , __UpperCAmelCase : float = 8_0 , __UpperCAmelCase : float = 7_6_0_0 , __UpperCAmelCase : float = 1E-10 , __UpperCAmelCase : int = 2 , __UpperCAmelCase : bool = True , **__UpperCAmelCase : Any , ) -> str:
"""simple docstring"""
super().__init__(feature_size=__UpperCAmelCase , sampling_rate=__UpperCAmelCase , padding_value=__UpperCAmelCase , **__UpperCAmelCase )
UpperCAmelCase__ = do_normalize
UpperCAmelCase__ = return_attention_mask
UpperCAmelCase__ = num_mel_bins
UpperCAmelCase__ = hop_length
UpperCAmelCase__ = win_length
UpperCAmelCase__ = win_function
UpperCAmelCase__ = frame_signal_scale
UpperCAmelCase__ = fmin
UpperCAmelCase__ = fmax
UpperCAmelCase__ = mel_floor
UpperCAmelCase__ = reduction_factor
UpperCAmelCase__ = win_length * sampling_rate // 1_0_0_0
UpperCAmelCase__ = hop_length * sampling_rate // 1_0_0_0
UpperCAmelCase__ = optimal_fft_length(self.sample_size )
UpperCAmelCase__ = (self.n_fft // 2) + 1
UpperCAmelCase__ = window_function(window_length=self.sample_size , name=self.win_function , periodic=__UpperCAmelCase )
UpperCAmelCase__ = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm="slaney" , mel_scale="slaney" , )
if frame_signal_scale != 1.0:
warnings.warn(
"The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers" , __UpperCAmelCase , )
if reduction_factor != 2.0:
warnings.warn(
"The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers" , __UpperCAmelCase , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def lowercase_ (__UpperCAmelCase : List[np.ndarray] , __UpperCAmelCase : List[np.ndarray] , __UpperCAmelCase : float = 0.0 ) -> List[np.ndarray]:
"""simple docstring"""
if attention_mask is not None:
UpperCAmelCase__ = np.array(__UpperCAmelCase , np.intaa )
UpperCAmelCase__ = []
for vector, length in zip(__UpperCAmelCase , attention_mask.sum(-1 ) ):
UpperCAmelCase__ = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
UpperCAmelCase__ = padding_value
normed_input_values.append(__UpperCAmelCase )
else:
UpperCAmelCase__ = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def lowercase_ (self : Optional[int] , __UpperCAmelCase : np.ndarray , ) -> np.ndarray:
"""simple docstring"""
UpperCAmelCase__ = spectrogram(
__UpperCAmelCase , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel="log10" , )
return log_mel_spec.T
def __call__(self : Any , __UpperCAmelCase : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , __UpperCAmelCase : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , __UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : bool = False , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[Union[str, TensorType]] = None , __UpperCAmelCase : Optional[int] = None , **__UpperCAmelCase : str , ) -> BatchFeature:
"""simple docstring"""
if audio is None and audio_target is None:
raise ValueError("You must provide either `audio` or `audio_target` values." )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
if audio is not None:
UpperCAmelCase__ = self._process_audio(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , )
else:
UpperCAmelCase__ = None
if audio_target is not None:
UpperCAmelCase__ = self._process_audio(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , )
if inputs is None:
return inputs_target
else:
UpperCAmelCase__ = inputs_target["input_values"]
UpperCAmelCase__ = inputs_target.get("attention_mask" )
if decoder_attention_mask is not None:
UpperCAmelCase__ = decoder_attention_mask
return inputs
def lowercase_ (self : Optional[int] , __UpperCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __UpperCAmelCase : bool = False , __UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : bool = False , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[Union[str, TensorType]] = None , **__UpperCAmelCase : Any , ) -> BatchFeature:
"""simple docstring"""
UpperCAmelCase__ = isinstance(__UpperCAmelCase , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
UpperCAmelCase__ = is_batched_numpy or (
isinstance(__UpperCAmelCase , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCAmelCase__ = [np.asarray(__UpperCAmelCase , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(__UpperCAmelCase , np.ndarray ):
UpperCAmelCase__ = np.asarray(__UpperCAmelCase , dtype=np.floataa )
elif isinstance(__UpperCAmelCase , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
UpperCAmelCase__ = speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCAmelCase__ = [speech]
# needed to make pad() work on spectrogram inputs
UpperCAmelCase__ = self.feature_size
# convert into correct format for padding
if is_target:
UpperCAmelCase__ = [self._extract_mel_features(__UpperCAmelCase ) for waveform in speech]
UpperCAmelCase__ = BatchFeature({"input_values": features} )
UpperCAmelCase__ = self.num_mel_bins
else:
UpperCAmelCase__ = BatchFeature({"input_values": speech} )
UpperCAmelCase__ = self.pad(
__UpperCAmelCase , padding=__UpperCAmelCase , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , **__UpperCAmelCase , )
UpperCAmelCase__ = feature_size_hack
# convert input values to correct format
UpperCAmelCase__ = padded_inputs["input_values"]
if not isinstance(input_values[0] , np.ndarray ):
UpperCAmelCase__ = [np.asarray(__UpperCAmelCase , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(__UpperCAmelCase , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
UpperCAmelCase__ = [array.astype(np.floataa ) for array in input_values]
elif isinstance(__UpperCAmelCase , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
UpperCAmelCase__ = input_values.astype(np.floataa )
# convert attention_mask to correct format
UpperCAmelCase__ = padded_inputs.get("attention_mask" )
if attention_mask is not None:
UpperCAmelCase__ = [np.asarray(__UpperCAmelCase , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
UpperCAmelCase__ = (
attention_mask
if self._get_padding_strategies(__UpperCAmelCase , max_length=__UpperCAmelCase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
UpperCAmelCase__ = self.zero_mean_unit_var_norm(
padded_inputs["input_values"] , attention_mask=__UpperCAmelCase , padding_value=self.padding_value )
if return_tensors is not None:
UpperCAmelCase__ = padded_inputs.convert_to_tensors(__UpperCAmelCase )
return padded_inputs
def lowercase_ (self : Tuple ) -> Dict[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = super().to_dict()
# Don't serialize these as they are derived from the other properties.
UpperCAmelCase__ = ["window", "mel_filters", "sample_size", "sample_stride", "n_fft", "n_freqs"]
for name in names:
if name in output:
del output[name]
return output
| 65 | 0 |
"""simple docstring"""
_A : str = 6_55_21
def __magic_name__ ( __snake_case : str ) -> int:
lowercase : Optional[Any] = 1
lowercase : str = 0
for plain_chr in plain_text:
lowercase : Optional[int] = (a + ord(__A )) % MOD_ADLER
lowercase : Tuple = (b + a) % MOD_ADLER
return (b << 16) | a
| 202 | from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Optional[torch.FloatTensor] = None
__UpperCAmelCase : torch.FloatTensor = None
__UpperCAmelCase : Optional[Tuple[torch.FloatTensor]] = None
__UpperCAmelCase : Optional[Tuple[torch.FloatTensor]] = None
class A ( UpperCAmelCase_ ):
def __init__(self : Union[str, Any] , __UpperCAmelCase : Tuple=1 , __UpperCAmelCase : str=0 , __UpperCAmelCase : str=2 , __UpperCAmelCase : Union[str, Any]=5_1_2 , __UpperCAmelCase : List[str]="cls" , __UpperCAmelCase : Optional[int]=False , __UpperCAmelCase : str=True , **__UpperCAmelCase : str , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
UpperCAmelCase__ = project_dim
UpperCAmelCase__ = pooler_fn
UpperCAmelCase__ = learn_encoder
UpperCAmelCase__ = use_attention_mask
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Tuple = [r'pooler', r'logit_scale']
__UpperCAmelCase : int = [r'position_ids', r'predictions.decoder.bias']
__UpperCAmelCase : Any = 'roberta'
__UpperCAmelCase : List[str] = RobertaSeriesConfig
def __init__(self : Tuple , __UpperCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
super().__init__(__UpperCAmelCase )
UpperCAmelCase__ = XLMRobertaModel(__UpperCAmelCase )
UpperCAmelCase__ = nn.Linear(config.hidden_size , config.project_dim )
UpperCAmelCase__ = getattr(__UpperCAmelCase , "has_pre_transformation" , __UpperCAmelCase )
if self.has_pre_transformation:
UpperCAmelCase__ = nn.Linear(config.hidden_size , config.project_dim )
UpperCAmelCase__ = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def lowercase_ (self : Optional[Any] , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[bool] = None , ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase__ = self.base_model(
input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , position_ids=__UpperCAmelCase , head_mask=__UpperCAmelCase , inputs_embeds=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , output_attentions=__UpperCAmelCase , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=__UpperCAmelCase , )
if self.has_pre_transformation:
UpperCAmelCase__ = outputs["hidden_states"][-2]
UpperCAmelCase__ = self.pre_LN(__UpperCAmelCase )
UpperCAmelCase__ = self.transformation_pre(__UpperCAmelCase )
return TransformationModelOutput(
projection_state=__UpperCAmelCase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
UpperCAmelCase__ = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=__UpperCAmelCase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 65 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase_ (UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
__magic_name__ = StableDiffusionXLImgaImgPipeline
__magic_name__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
__magic_name__ = PipelineTesterMixin.required_optional_params - {'latents'}
__magic_name__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__magic_name__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
__magic_name__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
torch.manual_seed(0 )
UpperCAmelCase_ : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , attention_head_dim=(2, 4) , use_linear_projection=__UpperCAmelCase , addition_embed_type="text_time" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
UpperCAmelCase_ : Any = EulerDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , steps_offset=1 , beta_schedule="scaled_linear" , timestep_spacing="leading" , )
torch.manual_seed(0 )
UpperCAmelCase_ : Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
UpperCAmelCase_ : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="gelu" , projection_dim=32 , )
UpperCAmelCase_ : Tuple = CLIPTextModel(__UpperCAmelCase )
UpperCAmelCase_ : Union[str, Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=__UpperCAmelCase )
UpperCAmelCase_ : List[Any] = CLIPTextModelWithProjection(__UpperCAmelCase )
UpperCAmelCase_ : Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=__UpperCAmelCase )
UpperCAmelCase_ : Any = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_encoder_2": text_encoder_a,
"tokenizer_2": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int]=0 ) -> List[Any]:
UpperCAmelCase_ : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
UpperCAmelCase_ : Tuple = image / 2 + 0.5
if str(__UpperCAmelCase ).startswith("mps" ):
UpperCAmelCase_ : Optional[int] = torch.manual_seed(__UpperCAmelCase )
else:
UpperCAmelCase_ : Tuple = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
UpperCAmelCase_ : List[str] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 5.0,
"output_type": "numpy",
"strength": 0.7_5,
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
UpperCAmelCase_ : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Dict = self.get_dummy_components()
UpperCAmelCase_ : Any = StableDiffusionXLImgaImgPipeline(**__UpperCAmelCase )
UpperCAmelCase_ : Optional[Any] = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase_ : Optional[Any] = self.get_dummy_inputs(__UpperCAmelCase )
UpperCAmelCase_ : Optional[int] = sd_pipe(**__UpperCAmelCase ).images
UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ : str = np.array([0.4_6_5_6, 0.4_8_4_0, 0.4_4_3_9, 0.6_6_9_8, 0.5_5_7_4, 0.4_5_2_4, 0.5_7_9_9, 0.5_9_4_3, 0.5_1_6_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
pass
def _SCREAMING_SNAKE_CASE ( self : int ) -> str:
UpperCAmelCase_ : Dict = self.get_dummy_components()
UpperCAmelCase_ : Dict = StableDiffusionXLImgaImgPipeline(**__UpperCAmelCase )
UpperCAmelCase_ : Any = sd_pipe.to(__UpperCAmelCase )
UpperCAmelCase_ : Optional[Any] = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
# forward without prompt embeds
UpperCAmelCase_ : Optional[int] = self.get_dummy_inputs(__UpperCAmelCase )
UpperCAmelCase_ : Dict = 3 * ["this is a negative prompt"]
UpperCAmelCase_ : List[Any] = negative_prompt
UpperCAmelCase_ : List[str] = 3 * [inputs["prompt"]]
UpperCAmelCase_ : List[Any] = sd_pipe(**__UpperCAmelCase )
UpperCAmelCase_ : List[Any] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
UpperCAmelCase_ : Any = self.get_dummy_inputs(__UpperCAmelCase )
UpperCAmelCase_ : Optional[int] = 3 * ["this is a negative prompt"]
UpperCAmelCase_ : List[str] = 3 * [inputs.pop("prompt" )]
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Optional[Any] = sd_pipe.encode_prompt(__UpperCAmelCase , negative_prompt=__UpperCAmelCase )
UpperCAmelCase_ : Optional[Any] = sd_pipe(
**__UpperCAmelCase , prompt_embeds=__UpperCAmelCase , negative_prompt_embeds=__UpperCAmelCase , pooled_prompt_embeds=__UpperCAmelCase , negative_pooled_prompt_embeds=__UpperCAmelCase , )
UpperCAmelCase_ : str = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class UpperCamelCase_ (unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any]="cpu" , lowerCAmelCase_ : Any=torch.floataa , lowerCAmelCase_ : List[Any]=0 ) -> Tuple:
UpperCAmelCase_ : str = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
UpperCAmelCase_ : str = np.random.RandomState(__UpperCAmelCase ).standard_normal((1, 4, 64, 64) )
UpperCAmelCase_ : Any = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase , dtype=__UpperCAmelCase )
UpperCAmelCase_ : Optional[int] = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
UpperCAmelCase_ : int = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-base" )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase_ : int = self.get_inputs(__UpperCAmelCase )
UpperCAmelCase_ : Optional[Any] = pipe(**__UpperCAmelCase ).images
UpperCAmelCase_ : Tuple = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ : Any = np.array([0.4_9_4_9_3, 0.4_7_8_9_6, 0.4_0_7_9_8, 0.5_4_2_1_4, 0.5_3_2_1_2, 0.4_8_2_0_2, 0.4_7_6_5_6, 0.4_6_3_2_9, 0.4_8_5_0_6] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 268 | import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 6_50, 'eval_accuracy': 0.6, 'eval_loss': 0.9},
},
{
'framework': 'tensorflow',
'script': 'run_tf.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 6_00, 'eval_accuracy': 0.3, 'eval_loss': 0.9},
},
] )
class A ( unittest.TestCase ):
def lowercase_ (self : int ) -> Optional[Any]:
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="utf-8" , check=__UpperCAmelCase , )
assert hasattr(self , "env" )
def lowercase_ (self : List[Any] , __UpperCAmelCase : Optional[int]=1 ) -> Dict:
"""simple docstring"""
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-single""" , instance_count=__UpperCAmelCase , instance_type=self.instance_type , debugger_hook_config=__UpperCAmelCase , hyperparameters={**self.env.hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="py36" , )
def lowercase_ (self : Optional[Any] , __UpperCAmelCase : Tuple ) -> Optional[int]:
"""simple docstring"""
TrainingJobAnalytics(__UpperCAmelCase ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
def lowercase_ (self : Any ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.create_estimator()
# run training
estimator.fit()
# result dataframe
UpperCAmelCase__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCAmelCase__ = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
UpperCAmelCase__ = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCAmelCase__ = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , __UpperCAmelCase )
| 65 | 0 |
"""simple docstring"""
import math
def a__ ( ):
"""simple docstring"""
UpperCamelCase = input("Enter message: " )
UpperCamelCase = int(input(F"Enter key [2-{len(__A ) - 1}]: " ) )
UpperCamelCase = input("Encryption/Decryption [e/d]: " )
if mode.lower().startswith("e" ):
UpperCamelCase = encrypt_message(__A , __A )
elif mode.lower().startswith("d" ):
UpperCamelCase = decrypt_message(__A , __A )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(F"Output:\n{text + '|'}" )
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = [""] * key
for col in range(__A ):
UpperCamelCase = col
while pointer < len(__A ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(__A )
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = math.ceil(len(__A ) / key )
UpperCamelCase = key
UpperCamelCase = (num_cols * num_rows) - len(__A )
UpperCamelCase = [""] * num_cols
UpperCamelCase = 0
UpperCamelCase = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
UpperCamelCase = 0
row += 1
return "".join(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 153 | import math
import random
def lowerCAmelCase_ ( __A, __A = False ) -> float:
'''simple docstring'''
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
UpperCamelCase__ = 0.0_2
def lowerCAmelCase_ ( __A, __A ) -> float:
'''simple docstring'''
UpperCAmelCase__ = float(2 * (random.randint(1, 100 )) - 1 )
for _ in range(__A ):
# Forward propagation
UpperCAmelCase__ = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
UpperCAmelCase__ = (expected / 100) - layer_a
# Error delta
UpperCAmelCase__ = layer_1_error * sigmoid_function(__A, __A )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ = int(input('Expected value: '))
UpperCamelCase__ = int(input('Number of propagations: '))
print(forward_propagation(expected, number_propagations))
| 65 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
a_ :Tuple = logging.get_logger(__name__)
a_ :Union[str, Any] = {
"deepmind/language-perceiver": "https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class snake_case__ ( UpperCAmelCase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 'perceiver'
def __init__( self : List[Any], _snake_case : Dict=2_5_6, _snake_case : Union[str, Any]=1_2_8_0, _snake_case : Dict=7_6_8, _snake_case : Dict=1, _snake_case : List[str]=2_6, _snake_case : int=8, _snake_case : List[Any]=8, _snake_case : Union[str, Any]=None, _snake_case : Optional[int]=None, _snake_case : Optional[Any]="kv", _snake_case : Dict=1, _snake_case : List[str]=1, _snake_case : Union[str, Any]="gelu", _snake_case : int=0.1, _snake_case : Dict=0.0_2, _snake_case : Any=1e-12, _snake_case : Optional[Any]=True, _snake_case : List[Any]=2_6_2, _snake_case : str=2_0_4_8, _snake_case : Any=5_6, _snake_case : int=[3_6_8, 4_9_6], _snake_case : Union[str, Any]=1_6, _snake_case : Any=1_9_2_0, _snake_case : Dict=1_6, _snake_case : Dict=[1, 1_6, 2_2_4, 2_2_4], **_snake_case : Optional[int], ) ->List[str]:
super().__init__(**__UpperCAmelCase )
snake_case__ : str = num_latents
snake_case__ : int = d_latents
snake_case__ : int = d_model
snake_case__ : Any = num_blocks
snake_case__ : Any = num_self_attends_per_block
snake_case__ : Optional[Any] = num_self_attention_heads
snake_case__ : Any = num_cross_attention_heads
snake_case__ : List[str] = qk_channels
snake_case__ : Optional[int] = v_channels
snake_case__ : List[str] = cross_attention_shape_for_attention
snake_case__ : Optional[Any] = self_attention_widening_factor
snake_case__ : Optional[Any] = cross_attention_widening_factor
snake_case__ : Tuple = hidden_act
snake_case__ : Optional[Any] = attention_probs_dropout_prob
snake_case__ : Dict = initializer_range
snake_case__ : List[str] = layer_norm_eps
snake_case__ : Union[str, Any] = use_query_residual
# masked language modeling attributes
snake_case__ : str = vocab_size
snake_case__ : List[str] = max_position_embeddings
# image classification attributes
snake_case__ : Optional[int] = image_size
# flow attributes
snake_case__ : Tuple = train_size
# multimodal autoencoding attributes
snake_case__ : Optional[int] = num_frames
snake_case__ : List[Any] = audio_samples_per_frame
snake_case__ : int = samples_per_patch
snake_case__ : Optional[int] = output_shape
class snake_case__ ( UpperCAmelCase_ ):
"""simple docstring"""
@property
def lowercase_ ( self : Optional[Any] ) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
snake_case__ : Dict = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
snake_case__ : Tuple = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('inputs', dynamic_axis),
('attention_mask', dynamic_axis),
] )
@property
def lowercase_ ( self : Union[str, Any] ) ->float:
return 1e-4
def lowercase_ ( self : Any, _snake_case : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"], _snake_case : int = -1, _snake_case : int = -1, _snake_case : int = -1, _snake_case : bool = False, _snake_case : Optional[TensorType] = None, _snake_case : int = 3, _snake_case : int = 4_0, _snake_case : int = 4_0, ) ->Mapping[str, Any]:
if isinstance(__UpperCAmelCase, __UpperCAmelCase ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
snake_case__ : Any = compute_effective_axis_dimension(
__UpperCAmelCase, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
snake_case__ : List[Any] = preprocessor.num_special_tokens_to_add(__UpperCAmelCase )
snake_case__ : Optional[int] = compute_effective_axis_dimension(
__UpperCAmelCase, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=__UpperCAmelCase )
# Generate dummy inputs according to compute batch and sequence
snake_case__ : List[str] = [' '.join(['a'] ) * seq_length] * batch_size
snake_case__ : List[Any] = dict(preprocessor(__UpperCAmelCase, return_tensors=__UpperCAmelCase ) )
snake_case__ : Optional[int] = inputs.pop('input_ids' )
return inputs
elif isinstance(__UpperCAmelCase, __UpperCAmelCase ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
snake_case__ : Optional[int] = compute_effective_axis_dimension(__UpperCAmelCase, fixed_dimension=OnnxConfig.default_fixed_batch )
snake_case__ : List[str] = self._generate_dummy_images(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
snake_case__ : int = dict(preprocessor(images=__UpperCAmelCase, return_tensors=__UpperCAmelCase ) )
snake_case__ : Optional[int] = inputs.pop('pixel_values' )
return inputs
else:
raise ValueError(
'Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.' )
| 277 | from __future__ import annotations
class A :
def __init__(self : Union[str, Any] , __UpperCAmelCase : list[list[int]] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = TypeError(
"Matrices must be formed from a list of zero or more lists containing at "
"least one and the same number of values, each of which must be of type "
"int or float." )
if len(__UpperCAmelCase ) != 0:
UpperCAmelCase__ = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(__UpperCAmelCase ) != cols:
raise error
for value in row:
if not isinstance(__UpperCAmelCase , (int, float) ):
raise error
UpperCAmelCase__ = rows
else:
UpperCAmelCase__ = []
def lowercase_ (self : Any ) -> list[list[int]]:
"""simple docstring"""
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def lowercase_ (self : Any ) -> int:
"""simple docstring"""
return len(self.rows )
@property
def lowercase_ (self : Union[str, Any] ) -> int:
"""simple docstring"""
return len(self.rows[0] )
@property
def lowercase_ (self : List[Any] ) -> tuple[int, int]:
"""simple docstring"""
return (self.num_rows, self.num_columns)
@property
def lowercase_ (self : Tuple ) -> bool:
"""simple docstring"""
return self.order[0] == self.order[1]
def lowercase_ (self : Any ) -> Matrix:
"""simple docstring"""
UpperCAmelCase__ = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(__UpperCAmelCase )
def lowercase_ (self : int ) -> int:
"""simple docstring"""
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def lowercase_ (self : Tuple ) -> bool:
"""simple docstring"""
return bool(self.determinant() )
def lowercase_ (self : Dict , __UpperCAmelCase : int , __UpperCAmelCase : int ) -> int:
"""simple docstring"""
UpperCAmelCase__ = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(__UpperCAmelCase ).determinant()
def lowercase_ (self : int , __UpperCAmelCase : int , __UpperCAmelCase : int ) -> int:
"""simple docstring"""
if (row + column) % 2 == 0:
return self.get_minor(__UpperCAmelCase , __UpperCAmelCase )
return -1 * self.get_minor(__UpperCAmelCase , __UpperCAmelCase )
def lowercase_ (self : Union[str, Any] ) -> Matrix:
"""simple docstring"""
return Matrix(
[
[self.get_minor(__UpperCAmelCase , __UpperCAmelCase ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def lowercase_ (self : List[str] ) -> Matrix:
"""simple docstring"""
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def lowercase_ (self : Optional[Any] ) -> Matrix:
"""simple docstring"""
UpperCAmelCase__ = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(__UpperCAmelCase )
def lowercase_ (self : List[Any] ) -> Matrix:
"""simple docstring"""
UpperCAmelCase__ = self.determinant()
if not determinant:
raise TypeError("Only matrices with a non-zero determinant have an inverse" )
return self.adjugate() * (1 / determinant)
def __repr__(self : Dict ) -> str:
"""simple docstring"""
return str(self.rows )
def __str__(self : Optional[Any] ) -> str:
"""simple docstring"""
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
"[" + ". ".join([str(__UpperCAmelCase ) for value in row] ) + ".]"
for row in self.rows
] )
+ "]"
)
def lowercase_ (self : Optional[int] , __UpperCAmelCase : list[int] , __UpperCAmelCase : int | None = None ) -> None:
"""simple docstring"""
UpperCAmelCase__ = TypeError("Row must be a list containing all ints and/or floats" )
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise type_error
for value in row:
if not isinstance(__UpperCAmelCase , (int, float) ):
raise type_error
if len(__UpperCAmelCase ) != self.num_columns:
raise ValueError(
"Row must be equal in length to the other rows in the matrix" )
if position is None:
self.rows.append(__UpperCAmelCase )
else:
UpperCAmelCase__ = self.rows[0:position] + [row] + self.rows[position:]
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : list[int] , __UpperCAmelCase : int | None = None ) -> None:
"""simple docstring"""
UpperCAmelCase__ = TypeError(
"Column must be a list containing all ints and/or floats" )
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise type_error
for value in column:
if not isinstance(__UpperCAmelCase , (int, float) ):
raise type_error
if len(__UpperCAmelCase ) != self.num_rows:
raise ValueError(
"Column must be equal in length to the other columns in the matrix" )
if position is None:
UpperCAmelCase__ = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
UpperCAmelCase__ = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__(self : Any , __UpperCAmelCase : object ) -> bool:
"""simple docstring"""
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return NotImplemented
return self.rows == other.rows
def __ne__(self : int , __UpperCAmelCase : object ) -> bool:
"""simple docstring"""
return not self == other
def __neg__(self : Dict ) -> Matrix:
"""simple docstring"""
return self * -1
def __add__(self : Dict , __UpperCAmelCase : Matrix ) -> Matrix:
"""simple docstring"""
if self.order != other.order:
raise ValueError("Addition requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__(self : Optional[Any] , __UpperCAmelCase : Matrix ) -> Matrix:
"""simple docstring"""
if self.order != other.order:
raise ValueError("Subtraction requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__(self : Tuple , __UpperCAmelCase : Matrix | int | float ) -> Matrix:
"""simple docstring"""
if isinstance(__UpperCAmelCase , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
if self.num_columns != other.num_rows:
raise ValueError(
"The number of columns in the first matrix must "
"be equal to the number of rows in the second" )
return Matrix(
[
[Matrix.dot_product(__UpperCAmelCase , __UpperCAmelCase ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
"A Matrix can only be multiplied by an int, float, or another matrix" )
def __pow__(self : List[Any] , __UpperCAmelCase : int ) -> Matrix:
"""simple docstring"""
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("A Matrix can only be raised to the power of an int" )
if not self.is_square:
raise ValueError("Only square matrices can be raised to a power" )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"Only invertable matrices can be raised to a negative power" )
UpperCAmelCase__ = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def lowercase_ (cls : Dict , __UpperCAmelCase : list[int] , __UpperCAmelCase : list[int] ) -> int:
"""simple docstring"""
return sum(row[i] * column[i] for i in range(len(__UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 | 0 |
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ : str =logging.get_logger(__name__)
lowerCAmelCase__ : List[str] ={'''vocab_file''': '''vocab.txt'''}
lowerCAmelCase__ : str ={
'''vocab_file''': {
'''openbmb/cpm-ant-10b''': '''https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt''',
},
}
lowerCAmelCase__ : Dict ={
'''openbmb/cpm-ant-10b''': 1024,
}
def __lowercase ( a__ ) -> str:
__SCREAMING_SNAKE_CASE = collections.OrderedDict()
with open(__A , 'r' , encoding='utf-8' ) as reader:
__SCREAMING_SNAKE_CASE = reader.readlines()
for index, token in enumerate(__A ):
__SCREAMING_SNAKE_CASE = token.rstrip('\n' )
__SCREAMING_SNAKE_CASE = index
return vocab
class UpperCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self , _A , _A="<unk>" , _A=200 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = vocab
__SCREAMING_SNAKE_CASE = unk_token
__SCREAMING_SNAKE_CASE = max_input_chars_per_word
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = list(__UpperCAmelCase )
if len(__UpperCAmelCase ) > self.max_input_chars_per_word:
return [self.unk_token]
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = []
while start < len(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = None
while start < end:
__SCREAMING_SNAKE_CASE = ''.join(chars[start:end] )
if substr in self.vocab:
__SCREAMING_SNAKE_CASE = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = end
return sub_tokens
class UpperCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase__ : Tuple = VOCAB_FILES_NAMES
UpperCamelCase__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : List[Any] = ['input_ids', 'attention_mask']
UpperCamelCase__ : Dict = False
def __init__( self , _A , _A="<d>" , _A="</d>" , _A="<s>" , _A="</s>" , _A="<pad>" , _A="<unk>" , _A="</n>" , _A="</_>" , _A="left" , **_A , ):
'''simple docstring'''
requires_backends(self , ['jieba'] )
super().__init__(
bod_token=__UpperCAmelCase , eod_token=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , line_token=__UpperCAmelCase , space_token=__UpperCAmelCase , padding_side=__UpperCAmelCase , **__UpperCAmelCase , )
__SCREAMING_SNAKE_CASE = bod_token
__SCREAMING_SNAKE_CASE = eod_token
__SCREAMING_SNAKE_CASE = load_vocab(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = self.encoder[space_token]
__SCREAMING_SNAKE_CASE = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
__SCREAMING_SNAKE_CASE = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _A : x[1] ) )
__SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()}
__SCREAMING_SNAKE_CASE = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def _A ( self ):
'''simple docstring'''
return self.encoder[self.bod_token]
@property
def _A ( self ):
'''simple docstring'''
return self.encoder[self.eod_token]
@property
def _A ( self ):
'''simple docstring'''
return self.encoder["\n"]
@property
def _A ( self ):
'''simple docstring'''
return len(self.encoder )
def _A ( self ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
for x in jieba.cut(__UpperCAmelCase , cut_all=__UpperCAmelCase ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(__UpperCAmelCase ) )
return output_tokens
def _A ( self , _A , **_A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [i for i in token_ids if i >= 0]
__SCREAMING_SNAKE_CASE = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(__UpperCAmelCase , **__UpperCAmelCase )
def _A ( self , _A ):
'''simple docstring'''
return token in self.encoder
def _A ( self , _A ):
'''simple docstring'''
return "".join(__UpperCAmelCase )
def _A ( self , _A ):
'''simple docstring'''
return self.encoder.get(__UpperCAmelCase , self.encoder.get(self.unk_token ) )
def _A ( self , _A ):
'''simple docstring'''
return self.decoder.get(__UpperCAmelCase , self.unk_token )
def _A ( self , _A , _A = None ):
'''simple docstring'''
if os.path.isdir(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = os.path.join(
__UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
__SCREAMING_SNAKE_CASE = (filename_prefix + '-' if filename_prefix else '') + save_directory
__SCREAMING_SNAKE_CASE = 0
if " " in self.encoder:
__SCREAMING_SNAKE_CASE = self.encoder[' ']
del self.encoder[" "]
if "\n" in self.encoder:
__SCREAMING_SNAKE_CASE = self.encoder['\n']
del self.encoder["\n"]
__SCREAMING_SNAKE_CASE = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _A : x[1] ) )
with open(__UpperCAmelCase , 'w' , encoding='utf-8' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
' Please check that the vocabulary is not corrupted!' )
__SCREAMING_SNAKE_CASE = token_index
writer.write(token + '\n' )
index += 1
return (vocab_file,)
def _A ( self , _A , _A = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def _A ( self , _A , _A = None , _A = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is not None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase ))
return [1] + ([0] * len(__UpperCAmelCase ))
| 257 | import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'vocab_file': 'vocab.json',
'tokenizer_config_file': 'tokenizer_config.json',
'merges_file': 'merges.txt',
}
UpperCamelCase__ = {
'vocab_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'
),
},
'tokenizer_config_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'
),
},
'merges_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'
),
},
}
UpperCamelCase__ = '</w>'
UpperCamelCase__ = '@@ '
def lowerCAmelCase_ ( __A ) -> str:
'''simple docstring'''
UpperCAmelCase__ = set()
UpperCAmelCase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase__ = char
return pairs
# Speech2Text2 has no max input length
UpperCamelCase__ = {'facebook/s2t-wav2vec2-large-en-de': 1_0_2_4}
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : str = VOCAB_FILES_NAMES
__UpperCAmelCase : str = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Dict = ['input_ids', 'attention_mask']
def __init__(self : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict="<s>" , __UpperCAmelCase : Tuple="<pad>" , __UpperCAmelCase : str="</s>" , __UpperCAmelCase : int="<unk>" , __UpperCAmelCase : List[str]=False , __UpperCAmelCase : str=None , **__UpperCAmelCase : Optional[Any] , ) -> Tuple:
"""simple docstring"""
super().__init__(
unk_token=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , **__UpperCAmelCase , )
UpperCAmelCase__ = do_lower_case
with open(__UpperCAmelCase , encoding="utf-8" ) as vocab_handle:
UpperCAmelCase__ = json.load(__UpperCAmelCase )
UpperCAmelCase__ = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""" )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
else:
with open(__UpperCAmelCase , encoding="utf-8" ) as merges_handle:
UpperCAmelCase__ = merges_handle.read().split("\n" )[:-1]
UpperCAmelCase__ = [tuple(merge.split()[:2] ) for merge in merges]
UpperCAmelCase__ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
UpperCAmelCase__ = {}
@property
def lowercase_ (self : List[str] ) -> int:
"""simple docstring"""
return len(self.decoder )
def lowercase_ (self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase_ (self : Dict , __UpperCAmelCase : Union[str, Any] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
UpperCAmelCase__ = get_pairs(__UpperCAmelCase )
if not pairs:
return token
while True:
UpperCAmelCase__ = min(__UpperCAmelCase , key=lambda __UpperCAmelCase : self.bpe_ranks.get(__UpperCAmelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase__ , UpperCAmelCase__ = bigram
UpperCAmelCase__ = []
UpperCAmelCase__ = 0
while i < len(__UpperCAmelCase ):
try:
UpperCAmelCase__ = word.index(__UpperCAmelCase , __UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase__ = j
if word[i] == first and i < len(__UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase__ = tuple(__UpperCAmelCase )
UpperCAmelCase__ = new_word
if len(__UpperCAmelCase ) == 1:
break
else:
UpperCAmelCase__ = get_pairs(__UpperCAmelCase )
UpperCAmelCase__ = " ".join(__UpperCAmelCase )
if word == "\n " + BPE_TOKEN_MERGES:
UpperCAmelCase__ = "\n" + BPE_TOKEN_MERGES
if word.endswith(__UpperCAmelCase ):
UpperCAmelCase__ = word.replace(__UpperCAmelCase , "" )
UpperCAmelCase__ = word.replace(" " , __UpperCAmelCase )
UpperCAmelCase__ = word
return word
def lowercase_ (self : Tuple , __UpperCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
if self.bpe_ranks is None:
raise ValueError(
"This tokenizer was instantiated without a `merges.txt` file, so"
" that it can only be used for decoding, not for encoding."
"Make sure to provide `merges.txt` file at instantiation to enable "
"encoding." )
if self.do_lower_case:
UpperCAmelCase__ = text.lower()
UpperCAmelCase__ = text.split()
UpperCAmelCase__ = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(__UpperCAmelCase ).split(" " ) ) )
return split_tokens
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : str ) -> int:
"""simple docstring"""
return self.encoder.get(__UpperCAmelCase , self.encoder.get(self.unk_token ) )
def lowercase_ (self : Any , __UpperCAmelCase : int ) -> str:
"""simple docstring"""
UpperCAmelCase__ = self.decoder.get(__UpperCAmelCase , self.unk_token )
return result
def lowercase_ (self : Dict , __UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = " ".join(__UpperCAmelCase )
# make sure @@ tokens are concatenated
UpperCAmelCase__ = "".join(string.split(__UpperCAmelCase ) )
return string
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase__ = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase__ = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__UpperCAmelCase , ensure_ascii=__UpperCAmelCase ) + "\n" )
UpperCAmelCase__ = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
UpperCAmelCase__ = token_index
writer.write(" ".join(__UpperCAmelCase ) + "\n" )
index += 1
return (vocab_file, merges_file)
| 65 | 0 |
"""simple docstring"""
from typing import Any
def __lowerCAmelCase ( lowercase : List[str] ) -> list[Any]:
"""simple docstring"""
if not input_list:
return []
snake_case : List[Any] = [input_list.count(__A ) for value in input_list]
snake_case : Optional[int] = max(__A ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(__A ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 203 | from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : torch.FloatTensor
class A ( nn.Module ):
def __init__(self : Union[str, Any] , __UpperCAmelCase : int=3 , __UpperCAmelCase : Dict=3 , __UpperCAmelCase : Optional[Any]=("DownEncoderBlock2D",) , __UpperCAmelCase : int=(6_4,) , __UpperCAmelCase : Union[str, Any]=2 , __UpperCAmelCase : Any=3_2 , __UpperCAmelCase : str="silu" , __UpperCAmelCase : Any=True , ) -> Dict:
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = layers_per_block
UpperCAmelCase__ = torch.nn.Convad(
__UpperCAmelCase , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
UpperCAmelCase__ = None
UpperCAmelCase__ = nn.ModuleList([] )
# down
UpperCAmelCase__ = block_out_channels[0]
for i, down_block_type in enumerate(__UpperCAmelCase ):
UpperCAmelCase__ = output_channel
UpperCAmelCase__ = block_out_channels[i]
UpperCAmelCase__ = i == len(__UpperCAmelCase ) - 1
UpperCAmelCase__ = get_down_block(
__UpperCAmelCase , num_layers=self.layers_per_block , in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=__UpperCAmelCase , resnet_groups=__UpperCAmelCase , attention_head_dim=__UpperCAmelCase , temb_channels=__UpperCAmelCase , )
self.down_blocks.append(__UpperCAmelCase )
# mid
UpperCAmelCase__ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=__UpperCAmelCase , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=__UpperCAmelCase , temb_channels=__UpperCAmelCase , )
# out
UpperCAmelCase__ = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=__UpperCAmelCase , eps=1E-6 )
UpperCAmelCase__ = nn.SiLU()
UpperCAmelCase__ = 2 * out_channels if double_z else out_channels
UpperCAmelCase__ = nn.Convad(block_out_channels[-1] , __UpperCAmelCase , 3 , padding=1 )
UpperCAmelCase__ = False
def lowercase_ (self : List[Any] , __UpperCAmelCase : int ) -> str:
"""simple docstring"""
UpperCAmelCase__ = x
UpperCAmelCase__ = self.conv_in(__UpperCAmelCase )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__UpperCAmelCase : int ):
def custom_forward(*__UpperCAmelCase : Optional[Any] ):
return module(*__UpperCAmelCase )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
UpperCAmelCase__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(__UpperCAmelCase ) , __UpperCAmelCase , use_reentrant=__UpperCAmelCase )
# middle
UpperCAmelCase__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __UpperCAmelCase , use_reentrant=__UpperCAmelCase )
else:
for down_block in self.down_blocks:
UpperCAmelCase__ = torch.utils.checkpoint.checkpoint(create_custom_forward(__UpperCAmelCase ) , __UpperCAmelCase )
# middle
UpperCAmelCase__ = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , __UpperCAmelCase )
else:
# down
for down_block in self.down_blocks:
UpperCAmelCase__ = down_block(__UpperCAmelCase )
# middle
UpperCAmelCase__ = self.mid_block(__UpperCAmelCase )
# post-process
UpperCAmelCase__ = self.conv_norm_out(__UpperCAmelCase )
UpperCAmelCase__ = self.conv_act(__UpperCAmelCase )
UpperCAmelCase__ = self.conv_out(__UpperCAmelCase )
return sample
class A ( nn.Module ):
def __init__(self : List[Any] , __UpperCAmelCase : str=3 , __UpperCAmelCase : Union[str, Any]=3 , __UpperCAmelCase : Optional[int]=("UpDecoderBlock2D",) , __UpperCAmelCase : str=(6_4,) , __UpperCAmelCase : Optional[Any]=2 , __UpperCAmelCase : Tuple=3_2 , __UpperCAmelCase : Any="silu" , __UpperCAmelCase : Any="group" , ) -> Dict:
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = layers_per_block
UpperCAmelCase__ = nn.Convad(
__UpperCAmelCase , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
UpperCAmelCase__ = None
UpperCAmelCase__ = nn.ModuleList([] )
UpperCAmelCase__ = in_channels if norm_type == "spatial" else None
# mid
UpperCAmelCase__ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=__UpperCAmelCase , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=__UpperCAmelCase , temb_channels=__UpperCAmelCase , )
# up
UpperCAmelCase__ = list(reversed(__UpperCAmelCase ) )
UpperCAmelCase__ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__UpperCAmelCase ):
UpperCAmelCase__ = output_channel
UpperCAmelCase__ = reversed_block_out_channels[i]
UpperCAmelCase__ = i == len(__UpperCAmelCase ) - 1
UpperCAmelCase__ = get_up_block(
__UpperCAmelCase , num_layers=self.layers_per_block + 1 , in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , prev_output_channel=__UpperCAmelCase , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=__UpperCAmelCase , resnet_groups=__UpperCAmelCase , attention_head_dim=__UpperCAmelCase , temb_channels=__UpperCAmelCase , resnet_time_scale_shift=__UpperCAmelCase , )
self.up_blocks.append(__UpperCAmelCase )
UpperCAmelCase__ = output_channel
# out
if norm_type == "spatial":
UpperCAmelCase__ = SpatialNorm(block_out_channels[0] , __UpperCAmelCase )
else:
UpperCAmelCase__ = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=__UpperCAmelCase , eps=1E-6 )
UpperCAmelCase__ = nn.SiLU()
UpperCAmelCase__ = nn.Convad(block_out_channels[0] , __UpperCAmelCase , 3 , padding=1 )
UpperCAmelCase__ = False
def lowercase_ (self : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict=None ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = z
UpperCAmelCase__ = self.conv_in(__UpperCAmelCase )
UpperCAmelCase__ = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__UpperCAmelCase : str ):
def custom_forward(*__UpperCAmelCase : List[str] ):
return module(*__UpperCAmelCase )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
UpperCAmelCase__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __UpperCAmelCase , __UpperCAmelCase , use_reentrant=__UpperCAmelCase )
UpperCAmelCase__ = sample.to(__UpperCAmelCase )
# up
for up_block in self.up_blocks:
UpperCAmelCase__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(__UpperCAmelCase ) , __UpperCAmelCase , __UpperCAmelCase , use_reentrant=__UpperCAmelCase )
else:
# middle
UpperCAmelCase__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = sample.to(__UpperCAmelCase )
# up
for up_block in self.up_blocks:
UpperCAmelCase__ = torch.utils.checkpoint.checkpoint(create_custom_forward(__UpperCAmelCase ) , __UpperCAmelCase , __UpperCAmelCase )
else:
# middle
UpperCAmelCase__ = self.mid_block(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = sample.to(__UpperCAmelCase )
# up
for up_block in self.up_blocks:
UpperCAmelCase__ = up_block(__UpperCAmelCase , __UpperCAmelCase )
# post-process
if latent_embeds is None:
UpperCAmelCase__ = self.conv_norm_out(__UpperCAmelCase )
else:
UpperCAmelCase__ = self.conv_norm_out(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = self.conv_act(__UpperCAmelCase )
UpperCAmelCase__ = self.conv_out(__UpperCAmelCase )
return sample
class A ( nn.Module ):
def __init__(self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Union[str, Any]="random" , __UpperCAmelCase : Dict=False , __UpperCAmelCase : Union[str, Any]=True ) -> Dict:
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = n_e
UpperCAmelCase__ = vq_embed_dim
UpperCAmelCase__ = beta
UpperCAmelCase__ = legacy
UpperCAmelCase__ = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
UpperCAmelCase__ = remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
UpperCAmelCase__ = self.used.shape[0]
UpperCAmelCase__ = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
UpperCAmelCase__ = self.re_embed
UpperCAmelCase__ = self.re_embed + 1
print(
f"""Remapping {self.n_e} indices to {self.re_embed} indices. """
f"""Using {self.unknown_index} for unknown indices.""" )
else:
UpperCAmelCase__ = n_e
UpperCAmelCase__ = sane_index_shape
def lowercase_ (self : str , __UpperCAmelCase : str ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = inds.shape
assert len(__UpperCAmelCase ) > 1
UpperCAmelCase__ = inds.reshape(ishape[0] , -1 )
UpperCAmelCase__ = self.used.to(__UpperCAmelCase )
UpperCAmelCase__ = (inds[:, :, None] == used[None, None, ...]).long()
UpperCAmelCase__ = match.argmax(-1 )
UpperCAmelCase__ = match.sum(2 ) < 1
if self.unknown_index == "random":
UpperCAmelCase__ = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
UpperCAmelCase__ = self.unknown_index
return new.reshape(__UpperCAmelCase )
def lowercase_ (self : Tuple , __UpperCAmelCase : Optional[int] ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = inds.shape
assert len(__UpperCAmelCase ) > 1
UpperCAmelCase__ = inds.reshape(ishape[0] , -1 )
UpperCAmelCase__ = self.used.to(__UpperCAmelCase )
if self.re_embed > self.used.shape[0]: # extra token
UpperCAmelCase__ = 0 # simply set to zero
UpperCAmelCase__ = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , __UpperCAmelCase )
return back.reshape(__UpperCAmelCase )
def lowercase_ (self : Optional[Any] , __UpperCAmelCase : Dict ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = z.permute(0 , 2 , 3 , 1 ).contiguous()
UpperCAmelCase__ = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
UpperCAmelCase__ = torch.argmin(torch.cdist(__UpperCAmelCase , self.embedding.weight ) , dim=1 )
UpperCAmelCase__ = self.embedding(__UpperCAmelCase ).view(z.shape )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
# compute loss for embedding
if not self.legacy:
UpperCAmelCase__ = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
UpperCAmelCase__ = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
UpperCAmelCase__ = z + (z_q - z).detach()
# reshape back to match original input shape
UpperCAmelCase__ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
UpperCAmelCase__ = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
UpperCAmelCase__ = self.remap_to_used(__UpperCAmelCase )
UpperCAmelCase__ = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
UpperCAmelCase__ = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def lowercase_ (self : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] ) -> Any:
"""simple docstring"""
if self.remap is not None:
UpperCAmelCase__ = indices.reshape(shape[0] , -1 ) # add batch axis
UpperCAmelCase__ = self.unmap_to_all(__UpperCAmelCase )
UpperCAmelCase__ = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
UpperCAmelCase__ = self.embedding(__UpperCAmelCase )
if shape is not None:
UpperCAmelCase__ = z_q.view(__UpperCAmelCase )
# reshape back to match original input shape
UpperCAmelCase__ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class A ( UpperCAmelCase_ ):
def __init__(self : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : str=False ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = parameters
UpperCAmelCase__ , UpperCAmelCase__ = torch.chunk(__UpperCAmelCase , 2 , dim=1 )
UpperCAmelCase__ = torch.clamp(self.logvar , -30.0 , 20.0 )
UpperCAmelCase__ = deterministic
UpperCAmelCase__ = torch.exp(0.5 * self.logvar )
UpperCAmelCase__ = torch.exp(self.logvar )
if self.deterministic:
UpperCAmelCase__ = UpperCAmelCase__ = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : Optional[torch.Generator] = None ) -> torch.FloatTensor:
"""simple docstring"""
UpperCAmelCase__ = randn_tensor(
self.mean.shape , generator=__UpperCAmelCase , device=self.parameters.device , dtype=self.parameters.dtype )
UpperCAmelCase__ = self.mean + self.std * sample
return x
def lowercase_ (self : str , __UpperCAmelCase : int=None ) -> Any:
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def lowercase_ (self : Dict , __UpperCAmelCase : Tuple , __UpperCAmelCase : Any=[1, 2, 3] ) -> Dict:
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
UpperCAmelCase__ = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=__UpperCAmelCase )
def lowercase_ (self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return self.mean
| 65 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _UpperCAmelCase ( metaclass=UpperCAmelCase_):
_lowerCAmelCase : Dict = ['torch', 'scipy']
def __init__( self : List[Any] , *lowercase_ : List[str] , **lowercase_ : List[Any] ):
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def _snake_case ( cls : Union[str, Any] , *lowercase_ : Any , **lowercase_ : Any ):
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def _snake_case ( cls : List[Any] , *lowercase_ : Optional[Any] , **lowercase_ : Optional[Any] ):
requires_backends(cls , ['''torch''', '''scipy'''] )
| 264 | import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def lowerCAmelCase_ ( __A, __A=False ) -> Any:
'''simple docstring'''
try:
UpperCAmelCase__ = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCAmelCase__ = default
else:
# KEY is set, convert it to True or False.
try:
UpperCAmelCase__ = strtobool(__A )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"""If set, {key} must be yes or no.""" )
return _value
UpperCamelCase__ = parse_flag_from_env('RUN_SLOW', default=False)
UpperCamelCase__ = parse_flag_from_env('RUN_REMOTE', default=False)
UpperCamelCase__ = parse_flag_from_env('RUN_LOCAL', default=True)
UpperCamelCase__ = parse_flag_from_env('RUN_PACKAGED', default=True)
# Compression
UpperCamelCase__ = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4')
UpperCamelCase__ = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr')
UpperCamelCase__ = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard')
# Audio
UpperCamelCase__ = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'),
reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ',
)
# Beam
UpperCamelCase__ = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'),
reason='test requires apache-beam and a compatible dill version',
)
# Dill-cloudpickle compatibility
UpperCamelCase__ = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('0.3.2'),
reason='test requires dill>0.3.2 for cloudpickle compatibility',
)
# Windows
UpperCamelCase__ = pytest.mark.skipif(
sys.platform == 'win32',
reason='test should not be run on Windows',
)
def lowerCAmelCase_ ( __A ) -> Any:
'''simple docstring'''
try:
import faiss # noqa
except ImportError:
UpperCAmelCase__ = unittest.skip("test requires faiss" )(__A )
return test_case
def lowerCAmelCase_ ( __A ) -> Optional[Any]:
'''simple docstring'''
try:
import regex # noqa
except ImportError:
UpperCAmelCase__ = unittest.skip("test requires regex" )(__A )
return test_case
def lowerCAmelCase_ ( __A ) -> List[str]:
'''simple docstring'''
try:
import elasticsearch # noqa
except ImportError:
UpperCAmelCase__ = unittest.skip("test requires elasticsearch" )(__A )
return test_case
def lowerCAmelCase_ ( __A ) -> List[Any]:
'''simple docstring'''
try:
import sqlalchemy # noqa
except ImportError:
UpperCAmelCase__ = unittest.skip("test requires sqlalchemy" )(__A )
return test_case
def lowerCAmelCase_ ( __A ) -> List[str]:
'''simple docstring'''
if not config.TORCH_AVAILABLE:
UpperCAmelCase__ = unittest.skip("test requires PyTorch" )(__A )
return test_case
def lowerCAmelCase_ ( __A ) -> Union[str, Any]:
'''simple docstring'''
if not config.TF_AVAILABLE:
UpperCAmelCase__ = unittest.skip("test requires TensorFlow" )(__A )
return test_case
def lowerCAmelCase_ ( __A ) -> Any:
'''simple docstring'''
if not config.JAX_AVAILABLE:
UpperCAmelCase__ = unittest.skip("test requires JAX" )(__A )
return test_case
def lowerCAmelCase_ ( __A ) -> int:
'''simple docstring'''
if not config.PIL_AVAILABLE:
UpperCAmelCase__ = unittest.skip("test requires Pillow" )(__A )
return test_case
def lowerCAmelCase_ ( __A ) -> Tuple:
'''simple docstring'''
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("test requires transformers" )(__A )
else:
return test_case
def lowerCAmelCase_ ( __A ) -> Dict:
'''simple docstring'''
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("test requires tiktoken" )(__A )
else:
return test_case
def lowerCAmelCase_ ( __A ) -> Optional[Any]:
'''simple docstring'''
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("test requires spacy" )(__A )
else:
return test_case
def lowerCAmelCase_ ( __A ) -> Optional[int]:
'''simple docstring'''
def _require_spacy_model(__A ):
try:
import spacy # noqa F401
spacy.load(__A )
except ImportError:
return unittest.skip("test requires spacy" )(__A )
except OSError:
return unittest.skip("test requires spacy model '{}'".format(__A ) )(__A )
else:
return test_case
return _require_spacy_model
def lowerCAmelCase_ ( __A ) -> Optional[Any]:
'''simple docstring'''
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("test requires pyspark" )(__A )
else:
return test_case
def lowerCAmelCase_ ( __A ) -> Tuple:
'''simple docstring'''
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("test requires joblibspark" )(__A )
else:
return test_case
def lowerCAmelCase_ ( __A ) -> Optional[int]:
'''simple docstring'''
if not _run_slow_tests or _run_slow_tests == 0:
UpperCAmelCase__ = unittest.skip("test is slow" )(__A )
return test_case
def lowerCAmelCase_ ( __A ) -> List[Any]:
'''simple docstring'''
if not _run_local_tests or _run_local_tests == 0:
UpperCAmelCase__ = unittest.skip("test is local" )(__A )
return test_case
def lowerCAmelCase_ ( __A ) -> Optional[Any]:
'''simple docstring'''
if not _run_packaged_tests or _run_packaged_tests == 0:
UpperCAmelCase__ = unittest.skip("test is packaged" )(__A )
return test_case
def lowerCAmelCase_ ( __A ) -> Any:
'''simple docstring'''
if not _run_remote_tests or _run_remote_tests == 0:
UpperCAmelCase__ = unittest.skip("test requires remote" )(__A )
return test_case
def lowerCAmelCase_ ( *__A ) -> Optional[int]:
'''simple docstring'''
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(__A ) and name.startswith("test" ):
for decorator in decorators:
UpperCAmelCase__ = decorator(__A )
setattr(cls, __A, __A )
return cls
return decorate
class A ( UpperCAmelCase_ ):
pass
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Union[str, Any] = 0
__UpperCAmelCase : str = 1
__UpperCAmelCase : int = 2
@contextmanager
def lowerCAmelCase_ ( __A=OfflineSimulationMode.CONNECTION_FAILS, __A=1e-16 ) -> List[str]:
'''simple docstring'''
UpperCAmelCase__ = requests.Session().request
def timeout_request(__A, __A, __A, **__A ):
# Change the url to an invalid url so that the connection hangs
UpperCAmelCase__ = "https://10.255.255.1"
if kwargs.get("timeout" ) is None:
raise RequestWouldHangIndefinitelyError(
f"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
UpperCAmelCase__ = timeout
try:
return online_request(__A, __A, **__A )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
UpperCAmelCase__ = url
UpperCAmelCase__ = e.args[0]
UpperCAmelCase__ = (max_retry_error.args[0].replace("10.255.255.1", f"""OfflineMock[{url}]""" ),)
UpperCAmelCase__ = (max_retry_error,)
raise
def raise_connection_error(__A, __A, **__A ):
raise requests.ConnectionError("Offline mode is enabled.", request=__A )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("requests.Session.send", __A ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("requests.Session.request", __A ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("datasets.config.HF_DATASETS_OFFLINE", __A ):
yield
else:
raise ValueError("Please use a value from the OfflineSimulationMode enum." )
@contextmanager
def lowerCAmelCase_ ( *__A, **__A ) -> str:
'''simple docstring'''
UpperCAmelCase__ = str(Path().resolve() )
with tempfile.TemporaryDirectory(*__A, **__A ) as tmp_dir:
try:
os.chdir(__A )
yield
finally:
os.chdir(__A )
@contextmanager
def lowerCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
import gc
gc.collect()
UpperCAmelCase__ = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def lowerCAmelCase_ ( ) -> List[str]:
'''simple docstring'''
import gc
gc.collect()
UpperCAmelCase__ = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def lowerCAmelCase_ ( __A, __A ) -> List[str]:
'''simple docstring'''
return deepcopy(__A ).integers(0, 100, 10 ).tolist() == deepcopy(__A ).integers(0, 100, 10 ).tolist()
def lowerCAmelCase_ ( __A ) -> Optional[int]:
'''simple docstring'''
import decorator
from requests.exceptions import HTTPError
def _wrapper(__A, *__A, **__A ):
try:
return func(*__A, **__A )
except HTTPError as err:
if str(__A ).startswith("500" ) or str(__A ).startswith("502" ):
pytest.xfail(str(__A ) )
raise err
return decorator.decorator(_wrapper, __A )
class A :
def __init__(self : Optional[Any] , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : List[str] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = returncode
UpperCAmelCase__ = stdout
UpperCAmelCase__ = stderr
async def lowerCAmelCase_ ( __A, __A ) -> Optional[int]:
'''simple docstring'''
while True:
UpperCAmelCase__ = await stream.readline()
if line:
callback(__A )
else:
break
async def lowerCAmelCase_ ( __A, __A=None, __A=None, __A=None, __A=False, __A=False ) -> _RunOutput:
'''simple docstring'''
if echo:
print("\nRunning: ", " ".join(__A ) )
UpperCAmelCase__ = await asyncio.create_subprocess_exec(
cmd[0], *cmd[1:], stdin=__A, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, env=__A, )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCAmelCase__ = []
UpperCAmelCase__ = []
def tee(__A, __A, __A, __A="" ):
UpperCAmelCase__ = line.decode("utf-8" ).rstrip()
sink.append(__A )
if not quiet:
print(__A, __A, file=__A )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda __A : tee(__A, __A, sys.stdout, label="stdout:" ) ),
_read_stream(p.stderr, lambda __A : tee(__A, __A, sys.stderr, label="stderr:" ) ),
], timeout=__A, )
return _RunOutput(await p.wait(), __A, __A )
def lowerCAmelCase_ ( __A, __A=None, __A=None, __A=180, __A=False, __A=True ) -> _RunOutput:
'''simple docstring'''
UpperCAmelCase__ = asyncio.get_event_loop()
UpperCAmelCase__ = loop.run_until_complete(
_stream_subprocess(__A, env=__A, stdin=__A, timeout=__A, quiet=__A, echo=__A ) )
UpperCAmelCase__ = " ".join(__A )
if result.returncode > 0:
UpperCAmelCase__ = "\n".join(result.stderr )
raise RuntimeError(
f"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
f"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"""'{cmd_str}' produced no output.""" )
return result
def lowerCAmelCase_ ( ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ = os.environ.get("PYTEST_XDIST_WORKER", "gw0" )
UpperCAmelCase__ = re.sub(r"^gw", "", __A, 0, re.M )
return int(__A )
def lowerCAmelCase_ ( ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ = 29_500
UpperCAmelCase__ = pytest_xdist_worker_id()
return port + uniq_delta
| 65 | 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __UpperCAmelCase ( UpperCAmelCase_ ):
UpperCamelCase = ['image_processor', 'tokenizer']
UpperCamelCase = 'CLIPImageProcessor'
UpperCamelCase = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__( self : Tuple, __A : str=None, __A : List[Any]=None, **__A : Dict ):
UpperCAmelCase : str = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''', __UpperCAmelCase, )
UpperCAmelCase : str = kwargs.pop('''feature_extractor''' )
UpperCAmelCase : Union[str, Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__UpperCAmelCase, __UpperCAmelCase )
def __call__( self : Optional[int], __A : Optional[Any]=None, __A : str=None, __A : Optional[Any]=None, **__A : Dict ):
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
UpperCAmelCase : str = self.tokenizer(__UpperCAmelCase, return_tensors=__UpperCAmelCase, **__UpperCAmelCase )
if images is not None:
UpperCAmelCase : Union[str, Any] = self.image_processor(__UpperCAmelCase, return_tensors=__UpperCAmelCase, **__UpperCAmelCase )
if text is not None and images is not None:
UpperCAmelCase : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCAmelCase ), tensor_type=__UpperCAmelCase )
def __magic_name__ ( self : Any, *__A : Union[str, Any], **__A : Dict ):
return self.tokenizer.batch_decode(*__UpperCAmelCase, **__UpperCAmelCase )
def __magic_name__ ( self : Any, *__A : List[Any], **__A : Dict ):
return self.tokenizer.decode(*__UpperCAmelCase, **__UpperCAmelCase )
@property
def __magic_name__ ( self : str ):
UpperCAmelCase : Tuple = self.tokenizer.model_input_names
UpperCAmelCase : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 336 | def lowerCAmelCase_ ( __A, __A ) -> float:
'''simple docstring'''
def get_matched_characters(__A, __A ) -> str:
UpperCAmelCase__ = []
UpperCAmelCase__ = min(len(_stra ), len(_stra ) ) // 2
for i, l in enumerate(_stra ):
UpperCAmelCase__ = int(max(0, i - limit ) )
UpperCAmelCase__ = int(min(i + limit + 1, len(_stra ) ) )
if l in _stra[left:right]:
matched.append(__A )
UpperCAmelCase__ = f"""{_stra[0:_stra.index(__A )]} {_stra[_stra.index(__A ) + 1:]}"""
return "".join(__A )
# matching characters
UpperCAmelCase__ = get_matched_characters(__A, __A )
UpperCAmelCase__ = get_matched_characters(__A, __A )
UpperCAmelCase__ = len(__A )
# transposition
UpperCAmelCase__ = (
len([(ca, ca) for ca, ca in zip(__A, __A ) if ca != ca] ) // 2
)
if not match_count:
UpperCAmelCase__ = 0.0
else:
UpperCAmelCase__ = (
1
/ 3
* (
match_count / len(__A )
+ match_count / len(__A )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
UpperCAmelCase__ = 0
for ca, ca in zip(stra[:4], stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 65 | 0 |
"""simple docstring"""
def _lowerCamelCase( a , a ):
def get_matched_characters(a , a ) -> str:
__a = []
__a = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
__a = int(max(0 , i - limit ) )
__a = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(__A )
__a = F"{_stra[0:_stra.index(__A )]} {_stra[_stra.index(__A ) + 1:]}"
return "".join(__A )
# matching characters
__a = get_matched_characters(__A , __A )
__a = get_matched_characters(__A , __A )
__a = len(__A )
# transposition
__a = (
len([(ca, ca) for ca, ca in zip(__A , __A ) if ca != ca] ) // 2
)
if not match_count:
__a = 0.0
else:
__a = (
1
/ 3
* (
match_count / len(__A )
+ match_count / len(__A )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
__a = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("""hello""", """world"""))
| 261 | def lowerCAmelCase_ ( __A, __A ) -> None:
'''simple docstring'''
UpperCAmelCase__ = len(__A )
print("The following activities are selected:" )
# The first activity is always selected
UpperCAmelCase__ = 0
print(__A, end="," )
# Consider rest of the activities
for j in range(__A ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(__A, end="," )
UpperCAmelCase__ = j
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ = [1, 3, 0, 5, 8, 5]
UpperCamelCase__ = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 65 | 0 |
"""simple docstring"""
import numpy as np
import datasets
_snake_case = '\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n'
_snake_case = '\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n'
_snake_case = '\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""X""": datasets.Sequence(datasets.Value("""float""" , id="""sequence""" ) , id="""X""" ),
} ) , )
def _lowercase ( self : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
_a : List[Any] = np.array(__UpperCAmelCase )
_a : Tuple = np.array(__UpperCAmelCase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("""Expected `X` to be a 2D vector""" )
if len(reference_distribution.shape ) != 2:
raise ValueError("""Expected `reference_distribution` to be a 2D vector""" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"""Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension""" )
# Get mahalanobis distance for each prediction
_a : Any = X - np.mean(__UpperCAmelCase )
_a : Optional[int] = np.cov(reference_distribution.T )
try:
_a : Any = np.linalg.inv(__UpperCAmelCase )
except np.linalg.LinAlgError:
_a : Any = np.linalg.pinv(__UpperCAmelCase )
_a : Optional[int] = np.dot(__UpperCAmelCase , __UpperCAmelCase )
_a : Dict = np.dot(__UpperCAmelCase , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 294 | import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
UpperCamelCase__ = 'base_with_context'
def lowerCAmelCase_ ( __A, __A ) -> int:
'''simple docstring'''
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights["token_embedder"]["embedding"] ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ), requires_grad=__A )
for lyr_num, lyr in enumerate(model.encoders ):
UpperCAmelCase__ = weights[f"""layers_{lyr_num}"""]
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
UpperCAmelCase__ = ly_weight["attention"]
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def lowerCAmelCase_ ( __A, __A ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights["input_proj"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ), requires_grad=__A )
for lyr_num, lyr in enumerate(model.encoders ):
UpperCAmelCase__ = weights[f"""layers_{lyr_num}"""]
UpperCAmelCase__ = ly_weight["attention"]
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def lowerCAmelCase_ ( __A, __A ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights["time_emb_dense0"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights["time_emb_dense1"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ), requires_grad=__A )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(weights["continuous_inputs_projection"]["kernel"].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
UpperCAmelCase__ = weights[f"""layers_{lyr_num}"""]
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight["pre_self_attention_layer_norm"]["scale"] ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_0"]["DenseGeneral_0"]["kernel"].T ) )
UpperCAmelCase__ = ly_weight["self_attention"]
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
UpperCAmelCase__ = ly_weight["MultiHeadDotProductAttention_0"]
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight["pre_cross_attention_layer_norm"]["scale"] ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_1"]["DenseGeneral_0"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights["decoder_norm"]["scale"] ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights["spec_out_dense"]["kernel"].T ) )
return model
def lowerCAmelCase_ ( __A ) -> int:
'''simple docstring'''
UpperCAmelCase__ = checkpoints.load_tax_checkpoint(args.checkpoint_path )
UpperCAmelCase__ = jnp.tree_util.tree_map(onp.array, __A )
UpperCAmelCase__ = [
"from __gin__ import dynamic_registration",
"from music_spectrogram_diffusion.models.diffusion import diffusion_utils",
"diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0",
"diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()",
]
UpperCAmelCase__ = os.path.join(args.checkpoint_path, "..", "config.gin" )
UpperCAmelCase__ = inference.parse_training_gin_file(__A, __A )
UpperCAmelCase__ = inference.InferenceModel(args.checkpoint_path, __A )
UpperCAmelCase__ = DDPMScheduler(beta_schedule="squaredcos_cap_v2", variance_type="fixed_large" )
UpperCAmelCase__ = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["inputs"], vocab_size=synth_model.model.module.config.vocab_size, d_model=synth_model.model.module.config.emb_dim, dropout_rate=synth_model.model.module.config.dropout_rate, num_layers=synth_model.model.module.config.num_encoder_layers, num_heads=synth_model.model.module.config.num_heads, d_kv=synth_model.model.module.config.head_dim, d_ff=synth_model.model.module.config.mlp_dim, feed_forward_proj="gated-gelu", )
UpperCAmelCase__ = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims, targets_context_length=synth_model.sequence_length["targets_context"], d_model=synth_model.model.module.config.emb_dim, dropout_rate=synth_model.model.module.config.dropout_rate, num_layers=synth_model.model.module.config.num_encoder_layers, num_heads=synth_model.model.module.config.num_heads, d_kv=synth_model.model.module.config.head_dim, d_ff=synth_model.model.module.config.mlp_dim, feed_forward_proj="gated-gelu", )
UpperCAmelCase__ = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims, targets_length=synth_model.sequence_length["targets_context"], max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time, d_model=synth_model.model.module.config.emb_dim, num_layers=synth_model.model.module.config.num_decoder_layers, num_heads=synth_model.model.module.config.num_heads, d_kv=synth_model.model.module.config.head_dim, d_ff=synth_model.model.module.config.mlp_dim, dropout_rate=synth_model.model.module.config.dropout_rate, )
UpperCAmelCase__ = load_notes_encoder(ta_checkpoint["target"]["token_encoder"], __A )
UpperCAmelCase__ = load_continuous_encoder(ta_checkpoint["target"]["continuous_encoder"], __A )
UpperCAmelCase__ = load_decoder(ta_checkpoint["target"]["decoder"], __A )
UpperCAmelCase__ = OnnxRuntimeModel.from_pretrained("kashif/soundstream_mel_decoder" )
UpperCAmelCase__ = SpectrogramDiffusionPipeline(
notes_encoder=__A, continuous_encoder=__A, decoder=__A, scheduler=__A, melgan=__A, )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument('--output_path', default=None, type=str, required=True, help='Path to the converted model.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument(
'--checkpoint_path',
default=f'''{MODEL}/checkpoint_500000''',
type=str,
required=False,
help='Path to the original jax model checkpoint.',
)
UpperCamelCase__ = parser.parse_args()
main(args)
| 65 | 0 |
"""simple docstring"""
from __future__ import annotations
A_ = '''Muhammad Umer Farooq'''
A_ = '''MIT'''
A_ = '''1.0.0'''
A_ = '''Muhammad Umer Farooq'''
A_ = '''contact@muhammadumerfarooq.me'''
A_ = '''Alpha'''
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class lowercase( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self: Optional[int], a_: str ):
'''simple docstring'''
super().__init__()
_snake_case : List[Any] = []
_snake_case : List[str] = domain
def UpperCamelCase_ ( self: List[Any], a_: str, a_: list[tuple[str, str | None]] ):
'''simple docstring'''
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
_snake_case : List[Any] = parse.urljoin(self.domain, __UpperCAmelCase )
self.urls.append(__UpperCAmelCase )
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
return ".".join(get_sub_domain_name(__A ).split(""".""" )[-2:] )
def UpperCAmelCase__ (snake_case__ : Union[str, Any] ):
"""simple docstring"""
return parse.urlparse(__A ).netloc
def UpperCAmelCase__ (snake_case__ : List[str] = "https://github.com" ):
"""simple docstring"""
_snake_case : Dict = get_domain_name(__A )
# Initialize the parser
_snake_case : List[str] = Parser(__A )
try:
# Open URL
_snake_case : Union[str, Any] = requests.get(__A )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
_snake_case : Union[str, Any] = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
_snake_case : List[Any] = requests.get(__A )
# Get the valid email.
_snake_case : Dict = re.findall("""[a-zA-Z0-9]+@""" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(__A )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(__A )
if __name__ == "__main__":
A_ = emails_from_url('''https://github.com''')
print(F'''{len(emails)} emails found:''')
print('''\n'''.join(sorted(emails)))
| 64 | import math
def lowerCAmelCase_ ( __A ) -> bool:
'''simple docstring'''
return math.sqrt(__A ) * math.sqrt(__A ) == num
def lowerCAmelCase_ ( __A ) -> bool:
'''simple docstring'''
UpperCAmelCase__ = 0
UpperCAmelCase__ = n
while left <= right:
UpperCAmelCase__ = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
UpperCAmelCase__ = mid - 1
else:
UpperCAmelCase__ = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 | 0 |
"""simple docstring"""
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
_A : Dict = logging.get_logger(__name__)
_A : str = """https://openaipublic.azureedge.net/jukebox/models/"""
_A : Optional[Any] = {
"""jukebox-1b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""1b_lyrics/prior_level_2.pth.tar""",
],
"""jukebox-5b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""5b_lyrics/prior_level_2.pth.tar""",
],
}
def __magic_name__ ( __snake_case : str ) -> Any:
if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 10:
lowercase : List[str] = key.replace(".model.1.bias" , ".conv1d_1.bias" )
elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 10:
lowercase : Any = key.replace(".model.1.weight" , ".conv1d_1.weight" )
elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 10:
lowercase : List[str] = key.replace(".model.3.bias" , ".conv1d_2.bias" )
elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 10:
lowercase : Tuple = key.replace(".model.3.weight" , ".conv1d_2.weight" )
if "conditioner_blocks.0." in key:
lowercase : Any = key.replace("conditioner_blocks.0" , "conditioner_blocks" )
if "prime_prior" in key:
lowercase : Tuple = key.replace("prime_prior" , "encoder" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
lowercase : List[Any] = key.replace(".emb." , "." )
if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(".k" , ".codebook" )
if "y_emb." in key:
return key.replace("y_emb." , "metadata_embedding." )
if "x_emb.emb." in key:
lowercase : Union[str, Any] = key.replace("0.x_emb.emb" , "embed_tokens" )
if "prime_state_ln" in key:
return key.replace("prime_state_ln" , "encoder.final_layer_norm" )
if ".ln" in key:
return key.replace(".ln" , ".layer_norm" )
if "_ln" in key:
return key.replace("_ln" , "_layer_norm" )
if "prime_state_proj" in key:
return key.replace("prime_state_proj" , "encoder.proj_in" )
if "prime_x_out" in key:
return key.replace("prime_x_out" , "encoder.lm_head" )
if "prior.x_out" in key:
return key.replace("x_out" , "fc_proj_out" )
if "x_emb" in key:
return key.replace("x_emb" , "embed_tokens" )
return key
def __magic_name__ ( __snake_case : Union[str, Any] , __snake_case : int , __snake_case : Tuple , __snake_case : List[str] ) -> Dict:
lowercase : int = {}
import re
lowercase : Union[str, Any] = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
lowercase : str = re.compile(
r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
lowercase : Union[str, Any] = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
lowercase : int = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
lowercase : List[str] = re.compile(
r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
lowercase : str = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
lowercase : Optional[Any] = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" )
lowercase : Dict = re.compile(
r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
lowercase : List[str] = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(__A ):
lowercase : Optional[Any] = re_encoder_block_conv_in.match(__A )
lowercase : Optional[int] = regex_match.groups()
lowercase : int = int(groups[2] ) * 2 + int(groups[3] )
lowercase : List[str] = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
lowercase : Optional[int] = re_encoder_block_conv_in.sub(__A , __A )
elif re_encoder_block_resnet.fullmatch(__A ):
lowercase : int = re_encoder_block_resnet.match(__A )
lowercase : Dict = regex_match.groups()
lowercase : List[str] = int(groups[2] ) * 2 + int(groups[3] )
lowercase : List[Any] = {"1": 1, "3": 2}[groups[-2]]
lowercase : Union[str, Any] = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
lowercase : Dict = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
lowercase : str = prefix + resnet_block
lowercase : Any = re_encoder_block_resnet.sub(__A , __A )
elif re_encoder_block_proj_out.fullmatch(__A ):
lowercase : Tuple = re_encoder_block_proj_out.match(__A )
lowercase : List[str] = regex_match.groups()
lowercase : Union[str, Any] = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
lowercase : str = re_encoder_block_proj_out.sub(__A , __A )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(__A ):
lowercase : Dict = re_decoder_block_conv_out.match(__A )
lowercase : Any = regex_match.groups()
lowercase : int = int(groups[2] ) * 2 + int(groups[3] ) - 2
lowercase : int = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
lowercase : Optional[int] = re_decoder_block_conv_out.sub(__A , __A )
elif re_decoder_block_resnet.fullmatch(__A ):
lowercase : str = re_decoder_block_resnet.match(__A )
lowercase : List[str] = regex_match.groups()
lowercase : int = int(groups[2] ) * 2 + int(groups[3] ) - 2
lowercase : Any = {"1": 1, "3": 2}[groups[-2]]
lowercase : Union[str, Any] = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
lowercase : str = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
lowercase : Optional[Any] = prefix + resnet_block
lowercase : Optional[int] = re_decoder_block_resnet.sub(__A , __A )
elif re_decoder_block_proj_in.fullmatch(__A ):
lowercase : Union[str, Any] = re_decoder_block_proj_in.match(__A )
lowercase : List[str] = regex_match.groups()
lowercase : Any = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
lowercase : Tuple = re_decoder_block_proj_in.sub(__A , __A )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(__A ):
lowercase : Union[str, Any] = re_prior_cond_conv_out.match(__A )
lowercase : int = regex_match.groups()
lowercase : Tuple = int(groups[1] ) * 2 + int(groups[2] ) - 2
lowercase : str = f"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
lowercase : Optional[Any] = re_prior_cond_conv_out.sub(__A , __A )
elif re_prior_cond_resnet.fullmatch(__A ):
lowercase : List[str] = re_prior_cond_resnet.match(__A )
lowercase : Optional[int] = regex_match.groups()
lowercase : Tuple = int(groups[1] ) * 2 + int(groups[2] ) - 2
lowercase : Any = {"1": 1, "3": 2}[groups[-2]]
lowercase : List[str] = f"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
lowercase : Tuple = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
lowercase : str = prefix + resnet_block
lowercase : Tuple = re_prior_cond_resnet.sub(__A , __A )
elif re_prior_cond_proj_in.fullmatch(__A ):
lowercase : Dict = re_prior_cond_proj_in.match(__A )
lowercase : Tuple = regex_match.groups()
lowercase : Union[str, Any] = f"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
lowercase : Tuple = re_prior_cond_proj_in.sub(__A , __A )
# keep original key
else:
lowercase : Optional[int] = original_key
lowercase : Dict = replace_key(__A )
if f"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(f"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[f"""{key_prefix}.{key}"""].shape:
lowercase : Optional[int] = model_state_dict[f"""{key_prefix}.{key}"""]
print(f"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
lowercase : Optional[int] = original_key
lowercase : Dict = original_key
lowercase : Union[str, Any] = value
return new_dict
@torch.no_grad()
def __magic_name__ ( __snake_case : List[Any]=None , __snake_case : Optional[Any]=None ) -> Optional[Any]:
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" ):
lowercase : Union[str, Any] = requests.get(f"""{PREFIX}{file}""" , allow_redirects=__A )
os.makedirs(f"""{pytorch_dump_folder_path}/""" , exist_ok=__A )
open(f"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" , "wb" ).write(r.content )
lowercase : Optional[int] = MODEL_MAPPING[model_name.split("/" )[-1]]
lowercase : Optional[Any] = JukeboxConfig.from_pretrained(__A )
lowercase : Any = JukeboxModel(__A )
lowercase : Dict = []
lowercase : Optional[Any] = {}
for i, dict_name in enumerate(__A ):
lowercase : Dict = torch.load(f"""{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}""" )["model"]
lowercase : List[Any] = {}
for k in old_dic.keys():
if k.endswith(".b" ):
lowercase : Optional[Any] = old_dic[k]
elif k.endswith(".w" ):
lowercase : int = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
lowercase : Any = old_dic[k]
else:
lowercase : Any = old_dic[k]
lowercase : Union[str, Any] = "vqvae" if i == 0 else f"""priors.{3 - i}"""
lowercase : Tuple = fix_jukebox_keys(__A , model.state_dict() , __A , __A )
weight_dict.append(__A )
lowercase : Any = weight_dict.pop(0 )
model.vqvae.load_state_dict(__A )
for i in range(len(__A ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(__A ).mkdir(exist_ok=__A )
with open(f"""{pytorch_dump_folder_path}/mapping.json""" , "w" ) as txtfile:
json.dump(__A , __A )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__A )
return weight_dict
if __name__ == "__main__":
_A : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
_A : Tuple = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 202 | import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : torch.FloatTensor
__UpperCAmelCase : Optional[torch.FloatTensor] = None
def lowerCAmelCase_ ( __A, __A=0.999, __A="cosine", ) -> Tuple:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(__A ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__A ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
UpperCAmelCase__ = []
for i in range(__A ):
UpperCAmelCase__ = i / num_diffusion_timesteps
UpperCAmelCase__ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__A ) / alpha_bar_fn(__A ), __A ) )
return torch.tensor(__A, dtype=torch.floataa )
class A ( UpperCAmelCase_ , UpperCAmelCase_ ):
@register_to_config
def __init__(self : List[str] , __UpperCAmelCase : int = 1_0_0_0 , __UpperCAmelCase : str = "fixed_small_log" , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[float] = 1.0 , __UpperCAmelCase : str = "epsilon" , __UpperCAmelCase : str = "squaredcos_cap_v2" , ) -> Optional[int]:
"""simple docstring"""
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" )
UpperCAmelCase__ = betas_for_alpha_bar(__UpperCAmelCase )
UpperCAmelCase__ = 1.0 - self.betas
UpperCAmelCase__ = torch.cumprod(self.alphas , dim=0 )
UpperCAmelCase__ = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
UpperCAmelCase__ = 1.0
# setable values
UpperCAmelCase__ = None
UpperCAmelCase__ = torch.from_numpy(np.arange(0 , __UpperCAmelCase )[::-1].copy() )
UpperCAmelCase__ = variance_type
def lowercase_ (self : List[str] , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : Optional[int] = None ) -> torch.FloatTensor:
"""simple docstring"""
return sample
def lowercase_ (self : int , __UpperCAmelCase : int , __UpperCAmelCase : Union[str, torch.device] = None ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = num_inference_steps
UpperCAmelCase__ = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
UpperCAmelCase__ = (np.arange(0 , __UpperCAmelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa )
UpperCAmelCase__ = torch.from_numpy(__UpperCAmelCase ).to(__UpperCAmelCase )
def lowercase_ (self : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : List[str]=None ) -> Tuple:
"""simple docstring"""
if prev_timestep is None:
UpperCAmelCase__ = t - 1
UpperCAmelCase__ = self.alphas_cumprod[t]
UpperCAmelCase__ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
UpperCAmelCase__ = 1 - alpha_prod_t
UpperCAmelCase__ = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
UpperCAmelCase__ = self.betas[t]
else:
UpperCAmelCase__ = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCAmelCase__ = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
UpperCAmelCase__ = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
UpperCAmelCase__ = torch.log(torch.clamp(__UpperCAmelCase , min=1E-20 ) )
UpperCAmelCase__ = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
UpperCAmelCase__ = variance.log()
UpperCAmelCase__ = beta.log()
UpperCAmelCase__ = (predicted_variance + 1) / 2
UpperCAmelCase__ = frac * max_log + (1 - frac) * min_log
return variance
def lowercase_ (self : Optional[int] , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : int , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : bool = True , ) -> Union[UnCLIPSchedulerOutput, Tuple]:
"""simple docstring"""
UpperCAmelCase__ = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
UpperCAmelCase__ , UpperCAmelCase__ = torch.split(__UpperCAmelCase , sample.shape[1] , dim=1 )
else:
UpperCAmelCase__ = None
# 1. compute alphas, betas
if prev_timestep is None:
UpperCAmelCase__ = t - 1
UpperCAmelCase__ = self.alphas_cumprod[t]
UpperCAmelCase__ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
UpperCAmelCase__ = 1 - alpha_prod_t
UpperCAmelCase__ = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
UpperCAmelCase__ = self.betas[t]
UpperCAmelCase__ = self.alphas[t]
else:
UpperCAmelCase__ = 1 - alpha_prod_t / alpha_prod_t_prev
UpperCAmelCase__ = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCAmelCase__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCAmelCase__ = model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"""
" for the UnCLIPScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCAmelCase__ = torch.clamp(
__UpperCAmelCase , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
UpperCAmelCase__ = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
UpperCAmelCase__ = 0
if t > 0:
UpperCAmelCase__ = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=__UpperCAmelCase , device=model_output.device )
UpperCAmelCase__ = self._get_variance(
__UpperCAmelCase , predicted_variance=__UpperCAmelCase , prev_timestep=__UpperCAmelCase , )
if self.variance_type == "fixed_small_log":
UpperCAmelCase__ = variance
elif self.variance_type == "learned_range":
UpperCAmelCase__ = (0.5 * variance).exp()
else:
raise ValueError(
f"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"""
" for the UnCLIPScheduler." )
UpperCAmelCase__ = variance * variance_noise
UpperCAmelCase__ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=__UpperCAmelCase , pred_original_sample=__UpperCAmelCase )
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : torch.IntTensor , ) -> torch.FloatTensor:
"""simple docstring"""
UpperCAmelCase__ = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
UpperCAmelCase__ = timesteps.to(original_samples.device )
UpperCAmelCase__ = alphas_cumprod[timesteps] ** 0.5
UpperCAmelCase__ = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
UpperCAmelCase__ = sqrt_alpha_prod.unsqueeze(-1 )
UpperCAmelCase__ = (1 - alphas_cumprod[timesteps]) ** 0.5
UpperCAmelCase__ = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
UpperCAmelCase__ = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
UpperCAmelCase__ = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 65 | 0 |
"""simple docstring"""
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 268 | import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class A ( unittest.TestCase ):
def lowercase_ (self : Union[str, Any] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = inspect.getfile(accelerate.test_utils )
UpperCAmelCase__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
UpperCAmelCase__ = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] )
UpperCAmelCase__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] )
@require_multi_gpu
def lowercase_ (self : List[str] ) -> Any:
"""simple docstring"""
print(f"""Found {torch.cuda.device_count()} devices.""" )
UpperCAmelCase__ = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() )
@require_multi_gpu
def lowercase_ (self : str ) -> str:
"""simple docstring"""
print(f"""Found {torch.cuda.device_count()} devices.""" )
UpperCAmelCase__ = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path]
print(f"""Command: {cmd}""" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() )
@require_multi_gpu
def lowercase_ (self : Tuple ) -> int:
"""simple docstring"""
UpperCAmelCase__ = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() )
@require_multi_gpu
def lowercase_ (self : Dict ) -> str:
"""simple docstring"""
print(f"""Found {torch.cuda.device_count()} devices, using 2 devices only""" )
UpperCAmelCase__ = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() )
if __name__ == "__main__":
UpperCamelCase__ = Accelerator()
UpperCamelCase__ = (accelerator.state.process_index + 2, 1_0)
UpperCamelCase__ = torch.randint(0, 1_0, shape).to(accelerator.device)
UpperCamelCase__ = ''
UpperCamelCase__ = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
UpperCamelCase__ = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
UpperCamelCase__ = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 65 | 0 |
"""simple docstring"""
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1_024 , _SCREAMING_SNAKE_CASE=1_024 , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = AutoTokenizer.from_pretrained(__A )
UpperCamelCase = SeqaSeqDataset(__A , __A , __A , __A , type_path="train" , **__A )
UpperCamelCase = tok.pad_token_id
def get_lens(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = tqdm(
DataLoader(__A , batch_size=512 , num_workers=8 , shuffle=__A , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
UpperCamelCase = []
for batch in dl:
UpperCamelCase = batch["input_ids"].ne(__A ).sum(1 ).tolist()
UpperCamelCase = batch["labels"].ne(__A ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(__A , __A ):
max_lens.append(max(__A , __A ) )
else:
max_lens.extend(__A )
return max_lens
UpperCamelCase = get_lens(__A )
UpperCamelCase = SeqaSeqDataset(__A , __A , __A , __A , type_path="val" , **__A )
UpperCamelCase = get_lens(__A )
pickle_save(__A , train_ds.len_file )
pickle_save(__A , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 153 | import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def lowerCAmelCase_ ( __A ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
"decoder.output_projection.weight",
]
for k in ignore_keys:
state_dict.pop(__A, __A )
def lowerCAmelCase_ ( __A ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ = emb.weight.shape
UpperCAmelCase__ = nn.Linear(__A, __A, bias=__A )
UpperCAmelCase__ = emb.weight.data
return lin_layer
def lowerCAmelCase_ ( __A, __A="facebook/mbart-large-en-ro", __A=False, __A=False ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ = torch.load(__A, map_location="cpu" )["model"]
remove_ignore_keys_(__A )
UpperCAmelCase__ = state_dict["encoder.embed_tokens.weight"].shape[0]
UpperCAmelCase__ = MBartConfig.from_pretrained(__A, vocab_size=__A )
if mbart_aa and finetuned:
UpperCAmelCase__ = "relu"
UpperCAmelCase__ = state_dict["decoder.embed_tokens.weight"]
UpperCAmelCase__ = MBartForConditionalGeneration(__A )
model.model.load_state_dict(__A )
if finetuned:
UpperCAmelCase__ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config',
default='facebook/mbart-large-cc25',
type=str,
help='Which huggingface architecture to use: mbart-large',
)
parser.add_argument('--mbart_50', action='store_true', help='whether the model is mMART-50 checkpoint')
parser.add_argument('--finetuned', action='store_true', help='whether the model is a fine-tuned checkpoint')
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 65 | 0 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
a_ :Any = importlib.util.find_spec("s3fs") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
a_ :int = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowercase_ (A : Optional[Any] ):
if "://" in dataset_path:
snake_case__ : int = dataset_path.split('://' )[1]
return dataset_path
def lowercase_ (A : Any ):
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowercase_ (A : int , A : int , A : Any ):
snake_case__ : Optional[Any] = not is_remote_filesystem(__A )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(__A ) , fs._strip_protocol(__A ) )
else:
fs.mv(__A , __A , recursive=__A )
def lowercase_ ():
if hasattr(fsspec.asyn , 'reset_lock' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
snake_case__ : Optional[int] = None
snake_case__ : Dict = None
snake_case__ : List[str] = threading.Lock()
| 277 | from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
UpperCamelCase__ = [
'python',
'tqdm',
'regex',
'requests',
'packaging',
'filelock',
'numpy',
'tokenizers',
'huggingface-hub',
'safetensors',
'accelerate',
'pyyaml',
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def lowerCAmelCase_ ( __A, __A=None ) -> Dict:
'''simple docstring'''
require_version(deps[pkg], __A )
| 65 | 0 |
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def __lowercase ( a__ , a__=False ) -> Any:
try:
__SCREAMING_SNAKE_CASE = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__SCREAMING_SNAKE_CASE = default
else:
# KEY is set, convert it to True or False.
try:
__SCREAMING_SNAKE_CASE = strtobool(__A )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"""If set, {key} must be yes or no.""" )
return _value
lowerCAmelCase__ : List[str] =parse_flag_from_env('''RUN_SLOW''', default=False)
lowerCAmelCase__ : int =parse_flag_from_env('''RUN_REMOTE''', default=False)
lowerCAmelCase__ : Union[str, Any] =parse_flag_from_env('''RUN_LOCAL''', default=True)
lowerCAmelCase__ : Any =parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
lowerCAmelCase__ : int =pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
lowerCAmelCase__ : Optional[int] =pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
lowerCAmelCase__ : Optional[int] =pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
lowerCAmelCase__ : Any =pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
lowerCAmelCase__ : List[Any] =pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
lowerCAmelCase__ : str =pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
lowerCAmelCase__ : List[Any] =pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def __lowercase ( a__ ) -> Any:
try:
import faiss # noqa
except ImportError:
__SCREAMING_SNAKE_CASE = unittest.skip('test requires faiss' )(__A )
return test_case
def __lowercase ( a__ ) -> Optional[Any]:
try:
import regex # noqa
except ImportError:
__SCREAMING_SNAKE_CASE = unittest.skip('test requires regex' )(__A )
return test_case
def __lowercase ( a__ ) -> List[str]:
try:
import elasticsearch # noqa
except ImportError:
__SCREAMING_SNAKE_CASE = unittest.skip('test requires elasticsearch' )(__A )
return test_case
def __lowercase ( a__ ) -> List[Any]:
try:
import sqlalchemy # noqa
except ImportError:
__SCREAMING_SNAKE_CASE = unittest.skip('test requires sqlalchemy' )(__A )
return test_case
def __lowercase ( a__ ) -> List[str]:
if not config.TORCH_AVAILABLE:
__SCREAMING_SNAKE_CASE = unittest.skip('test requires PyTorch' )(__A )
return test_case
def __lowercase ( a__ ) -> Union[str, Any]:
if not config.TF_AVAILABLE:
__SCREAMING_SNAKE_CASE = unittest.skip('test requires TensorFlow' )(__A )
return test_case
def __lowercase ( a__ ) -> Any:
if not config.JAX_AVAILABLE:
__SCREAMING_SNAKE_CASE = unittest.skip('test requires JAX' )(__A )
return test_case
def __lowercase ( a__ ) -> int:
if not config.PIL_AVAILABLE:
__SCREAMING_SNAKE_CASE = unittest.skip('test requires Pillow' )(__A )
return test_case
def __lowercase ( a__ ) -> Tuple:
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('test requires transformers' )(__A )
else:
return test_case
def __lowercase ( a__ ) -> Dict:
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('test requires tiktoken' )(__A )
else:
return test_case
def __lowercase ( a__ ) -> Optional[Any]:
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('test requires spacy' )(__A )
else:
return test_case
def __lowercase ( a__ ) -> Optional[int]:
def _require_spacy_model(a__ ):
try:
import spacy # noqa F401
spacy.load(__A )
except ImportError:
return unittest.skip('test requires spacy' )(__A )
except OSError:
return unittest.skip('test requires spacy model \'{}\''.format(__A ) )(__A )
else:
return test_case
return _require_spacy_model
def __lowercase ( a__ ) -> Optional[Any]:
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('test requires pyspark' )(__A )
else:
return test_case
def __lowercase ( a__ ) -> Tuple:
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('test requires joblibspark' )(__A )
else:
return test_case
def __lowercase ( a__ ) -> Optional[int]:
if not _run_slow_tests or _run_slow_tests == 0:
__SCREAMING_SNAKE_CASE = unittest.skip('test is slow' )(__A )
return test_case
def __lowercase ( a__ ) -> List[Any]:
if not _run_local_tests or _run_local_tests == 0:
__SCREAMING_SNAKE_CASE = unittest.skip('test is local' )(__A )
return test_case
def __lowercase ( a__ ) -> Optional[Any]:
if not _run_packaged_tests or _run_packaged_tests == 0:
__SCREAMING_SNAKE_CASE = unittest.skip('test is packaged' )(__A )
return test_case
def __lowercase ( a__ ) -> Any:
if not _run_remote_tests or _run_remote_tests == 0:
__SCREAMING_SNAKE_CASE = unittest.skip('test requires remote' )(__A )
return test_case
def __lowercase ( *a__ ) -> Optional[int]:
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(__A ) and name.startswith('test' ):
for decorator in decorators:
__SCREAMING_SNAKE_CASE = decorator(__A )
setattr(cls , __A , __A )
return cls
return decorate
class UpperCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
pass
class UpperCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = 0
UpperCamelCase__ : str = 1
UpperCamelCase__ : int = 2
@contextmanager
def __lowercase ( a__=OfflineSimulationMode.CONNECTION_FAILS , a__=1E-16 ) -> List[str]:
__SCREAMING_SNAKE_CASE = requests.Session().request
def timeout_request(a__ , a__ , a__ , **a__ ):
# Change the url to an invalid url so that the connection hangs
__SCREAMING_SNAKE_CASE = 'https://10.255.255.1'
if kwargs.get('timeout' ) is None:
raise RequestWouldHangIndefinitelyError(
f"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
__SCREAMING_SNAKE_CASE = timeout
try:
return online_request(__A , __A , **__A )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__SCREAMING_SNAKE_CASE = url
__SCREAMING_SNAKE_CASE = e.args[0]
__SCREAMING_SNAKE_CASE = (max_retry_error.args[0].replace('10.255.255.1' , f"""OfflineMock[{url}]""" ),)
__SCREAMING_SNAKE_CASE = (max_retry_error,)
raise
def raise_connection_error(a__ , a__ , **a__ ):
raise requests.ConnectionError('Offline mode is enabled.' , request=__A )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('requests.Session.send' , __A ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('requests.Session.request' , __A ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('datasets.config.HF_DATASETS_OFFLINE' , __A ):
yield
else:
raise ValueError('Please use a value from the OfflineSimulationMode enum.' )
@contextmanager
def __lowercase ( *a__ , **a__ ) -> str:
__SCREAMING_SNAKE_CASE = str(Path().resolve() )
with tempfile.TemporaryDirectory(*__A , **__A ) as tmp_dir:
try:
os.chdir(__A )
yield
finally:
os.chdir(__A )
@contextmanager
def __lowercase ( ) -> Optional[Any]:
import gc
gc.collect()
__SCREAMING_SNAKE_CASE = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def __lowercase ( ) -> List[str]:
import gc
gc.collect()
__SCREAMING_SNAKE_CASE = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def __lowercase ( a__ , a__ ) -> List[str]:
return deepcopy(__A ).integers(0 , 1_00 , 10 ).tolist() == deepcopy(__A ).integers(0 , 1_00 , 10 ).tolist()
def __lowercase ( a__ ) -> Optional[int]:
import decorator
from requests.exceptions import HTTPError
def _wrapper(a__ , *a__ , **a__ ):
try:
return func(*__A , **__A )
except HTTPError as err:
if str(__A ).startswith('500' ) or str(__A ).startswith('502' ):
pytest.xfail(str(__A ) )
raise err
return decorator.decorator(_wrapper , __A )
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _A , _A , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = returncode
__SCREAMING_SNAKE_CASE = stdout
__SCREAMING_SNAKE_CASE = stderr
async def __lowercase ( a__ , a__ ) -> Optional[int]:
while True:
__SCREAMING_SNAKE_CASE = await stream.readline()
if line:
callback(__A )
else:
break
async def __lowercase ( a__ , a__=None , a__=None , a__=None , a__=False , a__=False ) -> _RunOutput:
if echo:
print('\nRunning: ' , ' '.join(__A ) )
__SCREAMING_SNAKE_CASE = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__A , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__A , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
def tee(a__ , a__ , a__ , a__="" ):
__SCREAMING_SNAKE_CASE = line.decode('utf-8' ).rstrip()
sink.append(__A )
if not quiet:
print(__A , __A , file=__A )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda a__ : tee(__A , __A , sys.stdout , label='stdout:' ) ),
_read_stream(p.stderr , lambda a__ : tee(__A , __A , sys.stderr , label='stderr:' ) ),
] , timeout=__A , )
return _RunOutput(await p.wait() , __A , __A )
def __lowercase ( a__ , a__=None , a__=None , a__=1_80 , a__=False , a__=True ) -> _RunOutput:
__SCREAMING_SNAKE_CASE = asyncio.get_event_loop()
__SCREAMING_SNAKE_CASE = loop.run_until_complete(
_stream_subprocess(__A , env=__A , stdin=__A , timeout=__A , quiet=__A , echo=__A ) )
__SCREAMING_SNAKE_CASE = ' '.join(__A )
if result.returncode > 0:
__SCREAMING_SNAKE_CASE = '\n'.join(result.stderr )
raise RuntimeError(
f"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
f"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"""'{cmd_str}' produced no output.""" )
return result
def __lowercase ( ) -> Tuple:
__SCREAMING_SNAKE_CASE = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0' )
__SCREAMING_SNAKE_CASE = re.sub(R'^gw' , '' , __A , 0 , re.M )
return int(__A )
def __lowercase ( ) -> List[Any]:
__SCREAMING_SNAKE_CASE = 2_95_00
__SCREAMING_SNAKE_CASE = pytest_xdist_worker_id()
return port + uniq_delta
| 257 | import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
UpperCamelCase__ = logging.getLogger(__name__)
def lowerCAmelCase_ ( ) -> int:
'''simple docstring'''
UpperCAmelCase__ = argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." )
parser.add_argument("--file_path", type=__A, default="data/dump.txt", help="The path to the data." )
parser.add_argument("--tokenizer_type", type=__A, default="bert", choices=["bert", "roberta", "gpt2"] )
parser.add_argument("--tokenizer_name", type=__A, default="bert-base-uncased", help="The tokenizer to use." )
parser.add_argument("--dump_file", type=__A, default="data/dump", help="The dump file prefix." )
UpperCAmelCase__ = parser.parse_args()
logger.info(f"""Loading Tokenizer ({args.tokenizer_name})""" )
if args.tokenizer_type == "bert":
UpperCAmelCase__ = BertTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase__ = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
UpperCAmelCase__ = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
UpperCAmelCase__ = RobertaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase__ = tokenizer.special_tokens_map["cls_token"] # `<s>`
UpperCAmelCase__ = tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
UpperCAmelCase__ = GPTaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase__ = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
UpperCAmelCase__ = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(f"""Loading text from {args.file_path}""" )
with open(args.file_path, "r", encoding="utf8" ) as fp:
UpperCAmelCase__ = fp.readlines()
logger.info("Start encoding" )
logger.info(f"""{len(__A )} examples to process.""" )
UpperCAmelCase__ = []
UpperCAmelCase__ = 0
UpperCAmelCase__ = 10_000
UpperCAmelCase__ = time.time()
for text in data:
UpperCAmelCase__ = f"""{bos} {text.strip()} {sep}"""
UpperCAmelCase__ = tokenizer.encode(__A, add_special_tokens=__A )
rslt.append(__A )
iter += 1
if iter % interval == 0:
UpperCAmelCase__ = time.time()
logger.info(f"""{iter} examples processed. - {(end-start):.2f}s/{interval}expl""" )
UpperCAmelCase__ = time.time()
logger.info("Finished binarization" )
logger.info(f"""{len(__A )} examples processed.""" )
UpperCAmelCase__ = f"""{args.dump_file}.{args.tokenizer_name}.pickle"""
UpperCAmelCase__ = tokenizer.vocab_size
if vocab_size < (1 << 16):
UpperCAmelCase__ = [np.uintaa(__A ) for d in rslt]
else:
UpperCAmelCase__ = [np.intaa(__A ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f"""Dump to {dp_file}""" )
with open(__A, "wb" ) as handle:
pickle.dump(rslt_, __A, protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 65 | 0 |
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
__snake_case = """bart"""
__snake_case = True
@st.cache(allow_output_mutation=__A )
def __lowerCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
if LOAD_DENSE_INDEX:
snake_case : int = AutoTokenizer.from_pretrained("yjernite/retribert-base-uncased" )
snake_case : Any = AutoModel.from_pretrained("yjernite/retribert-base-uncased" ).to("cuda:0" )
snake_case : Optional[int] = qar_model.eval()
else:
snake_case ,snake_case : str = (None, None)
if MODEL_TYPE == "bart":
snake_case : int = AutoTokenizer.from_pretrained("yjernite/bart_eli5" )
snake_case : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained("yjernite/bart_eli5" ).to("cuda:0" )
snake_case : Optional[int] = torch.load("seq2seq_models/eli5_bart_model_blm_2.pth" )
sas_model.load_state_dict(save_dict["model"] )
snake_case : Dict = sas_model.eval()
else:
snake_case ,snake_case : Any = make_qa_sas_model(
model_name="t5-small" , from_file="seq2seq_models/eli5_t5_model_1024_4.pth" , device="cuda:0" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=__A )
def __lowerCAmelCase ( ) -> Dict:
"""simple docstring"""
if LOAD_DENSE_INDEX:
snake_case : Dict = faiss.StandardGpuResources()
snake_case : Optional[int] = datasets.load_dataset(path="wiki_snippets" , name="wiki40b_en_100_0" )["train"]
snake_case : Optional[Any] = np.memmap(
"wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat" , dtype="float32" , mode="r" , shape=(wikiaab_passages.num_rows, 128) , )
snake_case : Union[str, Any] = faiss.IndexFlatIP(128 )
snake_case : List[Any] = faiss.index_cpu_to_gpu(__A , 1 , __A )
wikiaab_gpu_index_flat.add(__A ) # TODO fix for larger GPU
else:
snake_case ,snake_case : int = (None, None)
snake_case : int = Elasticsearch([{"host": "localhost", "port": "9200"}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=__A )
def __lowerCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
snake_case : List[Any] = datasets.load_dataset("eli5" , name="LFQA_reddit" )
snake_case : Tuple = elia["train_eli5"]
snake_case : str = np.memmap(
"eli5_questions_reps.dat" , dtype="float32" , mode="r" , shape=(elia_train.num_rows, 128) )
snake_case : List[Any] = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(__A )
return (elia_train, eli5_train_q_index)
__snake_case , __snake_case , __snake_case = load_indexes()
__snake_case , __snake_case , __snake_case , __snake_case = load_models()
__snake_case , __snake_case = load_train_data()
def __lowerCAmelCase ( lowercase : Optional[Any] , lowercase : Optional[Any]=10 ) -> Optional[int]:
"""simple docstring"""
snake_case : str = embed_questions_for_retrieval([question] , __A , __A )
snake_case ,snake_case : List[Any] = eli5_train_q_index.search(__A , __A )
snake_case : Optional[int] = [elia_train[int(__A )] for i in I[0]]
return nn_examples
def __lowerCAmelCase ( lowercase : List[Any] , lowercase : Union[str, Any]="wiki40b" , lowercase : Union[str, Any]="dense" , lowercase : str=10 ) -> Optional[Any]:
"""simple docstring"""
if source == "none":
snake_case ,snake_case : Optional[int] = (" <P> ".join(["" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
snake_case ,snake_case : Union[str, Any] = query_qa_dense_index(
__A , __A , __A , __A , __A , __A )
else:
snake_case ,snake_case : Any = query_es_index(
__A , __A , index_name="english_wiki40b_snippets_100w" , n_results=__A , )
snake_case : Optional[Any] = [
(res["article_title"], res["section_title"].strip(), res["score"], res["passage_text"]) for res in hit_lst
]
snake_case : int = "question: {} context: {}".format(__A , __A )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda lowercase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda lowercase : None),
} )
def __lowerCAmelCase ( lowercase : Optional[int] , lowercase : List[Any] , lowercase : int , lowercase : List[Any]=64 , lowercase : Any=256 , lowercase : str=False , lowercase : Optional[Any]=2 , lowercase : Dict=0.95 , lowercase : int=0.8 ) -> Optional[Any]:
"""simple docstring"""
with torch.no_grad():
snake_case : Any = qa_sas_generate(
__A , __A , __A , num_answers=1 , num_beams=__A , min_len=__A , max_len=__A , do_sample=__A , temp=__A , top_p=__A , top_k=__A , max_input_length=1024 , device="cuda:0" , )[0]
return (answer, support_list)
st.title("""Long Form Question Answering with ELI5""")
# Start sidebar
__snake_case = """<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>"""
__snake_case = """\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n""" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
__snake_case = """\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"""
st.sidebar.markdown(description, unsafe_allow_html=True)
__snake_case = [
"""Answer the question""",
"""View the retrieved document only""",
"""View the most similar ELI5 question and answer""",
"""Show me everything, please!""",
]
__snake_case = st.sidebar.checkbox("""Demo options""")
if demo_options:
__snake_case = st.sidebar.selectbox(
"""""",
action_list,
index=3,
)
__snake_case = action_list.index(action_st)
__snake_case = st.sidebar.selectbox(
"""""",
["""Show full text of passages""", """Show passage section titles"""],
index=0,
)
__snake_case = show_type == """Show full text of passages"""
else:
__snake_case = 3
__snake_case = True
__snake_case = st.sidebar.checkbox("""Retrieval options""")
if retrieval_options:
__snake_case = """\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n """
st.sidebar.markdown(retriever_info)
__snake_case = st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""])
__snake_case = st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""])
else:
__snake_case = """wiki40b"""
__snake_case = """dense"""
__snake_case = """beam"""
__snake_case = 2
__snake_case = 64
__snake_case = 256
__snake_case = None
__snake_case = None
__snake_case = st.sidebar.checkbox("""Generation options""")
if generate_options:
__snake_case = """\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n """
st.sidebar.markdown(generate_info)
__snake_case = st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""])
__snake_case = st.sidebar.slider(
"""Minimum generation length""", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
__snake_case = st.sidebar.slider(
"""Maximum generation length""", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
__snake_case = st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
__snake_case = st.sidebar.slider(
"""Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
__snake_case = st.sidebar.slider(
"""Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
__snake_case = None
# start main text
__snake_case = [
"""<MY QUESTION>""",
"""How do people make chocolate?""",
"""Why do we get a fever when we are sick?""",
"""How can different animals perceive different colors?""",
"""What is natural language processing?""",
"""What\'s the best way to treat a sunburn?""",
"""What exactly are vitamins ?""",
"""How does nuclear energy provide electricity?""",
"""What\'s the difference between viruses and bacteria?""",
"""Why are flutes classified as woodwinds when most of them are made out of metal ?""",
"""Why do people like drinking coffee even though it tastes so bad?""",
"""What happens when wine ages? How does it make the wine taste better?""",
"""If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""",
"""How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?""",
"""How does New Zealand have so many large bird predators?""",
]
__snake_case = st.selectbox(
"""What would you like to ask? ---- select <MY QUESTION> to enter a new query""",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
__snake_case = st.text_input("""Enter your question here:""", """""")
else:
__snake_case = question_s
if st.button("""Show me!"""):
if action in [0, 1, 3]:
if index_type == "mixed":
__snake_case , __snake_case = make_support(question, source=wiki_source, method="""dense""", n_results=10)
__snake_case , __snake_case = make_support(question, source=wiki_source, method="""sparse""", n_results=10)
__snake_case = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
__snake_case = support_list[:10]
__snake_case = """<P> """ + """ <P> """.join([res[-1] for res in support_list])
else:
__snake_case , __snake_case = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
__snake_case , __snake_case = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == """sampled"""),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("""### The model generated answer is:""")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""")
for i, res in enumerate(support_list):
__snake_case = """https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_"""))
__snake_case = res[1].strip()
if sec_titles == "":
__snake_case = """[{}]({})""".format(res[0], wiki_url)
else:
__snake_case = sec_titles.split(""" & """)
__snake_case = """ & """.join(
["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list]
)
st.markdown(
"""{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"""> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True
)
if action in [2, 3]:
__snake_case = find_nearest_training(question)
__snake_case = nn_train_list[0]
st.markdown(
"""--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""])
)
__snake_case = [
"""{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""]))
for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""]))
if i == 0 or sc > 2
]
st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st)))
__snake_case = """\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"""
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 203 | from manim import *
class A ( UpperCAmelCase_ ):
def lowercase_ (self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = Rectangle(height=0.5 , width=0.5 )
UpperCAmelCase__ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCAmelCase__ = Rectangle(height=0.25 , width=0.25 )
UpperCAmelCase__ = [mem.copy() for i in range(6 )]
UpperCAmelCase__ = [mem.copy() for i in range(6 )]
UpperCAmelCase__ = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase__ = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase__ = VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase__ = Text("CPU" , font_size=2_4 )
UpperCAmelCase__ = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__UpperCAmelCase )
UpperCAmelCase__ = [mem.copy() for i in range(4 )]
UpperCAmelCase__ = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase__ = Text("GPU" , font_size=2_4 )
UpperCAmelCase__ = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(__UpperCAmelCase )
UpperCAmelCase__ = [mem.copy() for i in range(6 )]
UpperCAmelCase__ = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase__ = Text("Model" , font_size=2_4 )
UpperCAmelCase__ = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(__UpperCAmelCase )
UpperCAmelCase__ = []
UpperCAmelCase__ = []
for i, rect in enumerate(__UpperCAmelCase ):
UpperCAmelCase__ = fill.copy().set_fill(__UpperCAmelCase , opacity=0.8 )
target.move_to(__UpperCAmelCase )
model_arr.append(__UpperCAmelCase )
UpperCAmelCase__ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(__UpperCAmelCase , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(__UpperCAmelCase )
self.add(*__UpperCAmelCase , *__UpperCAmelCase )
UpperCAmelCase__ = [meta_mem.copy() for i in range(6 )]
UpperCAmelCase__ = [meta_mem.copy() for i in range(6 )]
UpperCAmelCase__ = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase__ = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase__ = VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase__ = Text("Disk" , font_size=2_4 )
UpperCAmelCase__ = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
disk.move_to([-4, -1.25, 0] )
self.add(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCAmelCase__ = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=1_8 , )
blue_text.next_to(__UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__UpperCAmelCase )
UpperCAmelCase__ = MarkupText(
f"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCAmelCase ) )
UpperCAmelCase__ = Square(0.3 )
input.set_fill(__UpperCAmelCase , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , __UpperCAmelCase , buff=0.5 )
self.play(Write(__UpperCAmelCase ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=__UpperCAmelCase , buff=0.02 )
self.play(MoveToTarget(__UpperCAmelCase ) )
self.play(FadeOut(__UpperCAmelCase ) )
UpperCAmelCase__ = Arrow(start=__UpperCAmelCase , end=__UpperCAmelCase , color=__UpperCAmelCase , buff=0.5 )
a.next_to(model_arr[0].get_left() , __UpperCAmelCase , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
UpperCAmelCase__ = MarkupText(
f"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCAmelCase , run_time=3 ) )
UpperCAmelCase__ = {"run_time": 1, "fade_in": True, "fade_out": True, "buff": 0.02}
self.play(
Write(__UpperCAmelCase ) , Circumscribe(model_arr[0] , color=__UpperCAmelCase , **__UpperCAmelCase ) , Circumscribe(model_cpu_arr[0] , color=__UpperCAmelCase , **__UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=__UpperCAmelCase , **__UpperCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
UpperCAmelCase__ = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , __UpperCAmelCase , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
UpperCAmelCase__ = AnimationGroup(
FadeOut(__UpperCAmelCase , run_time=0.5 ) , MoveToTarget(__UpperCAmelCase , run_time=0.5 ) , FadeIn(__UpperCAmelCase , run_time=0.5 ) , lag_ratio=0.2 )
self.play(__UpperCAmelCase )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
UpperCAmelCase__ = 0.7
self.play(
Circumscribe(model_arr[i] , **__UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i] , **__UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=__UpperCAmelCase , **__UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=__UpperCAmelCase , **__UpperCAmelCase ) , Circumscribe(model_arr[i + 1] , color=__UpperCAmelCase , **__UpperCAmelCase ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=__UpperCAmelCase , **__UpperCAmelCase ) , Circumscribe(cpu_left_col_base[-1] , color=__UpperCAmelCase , **__UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=__UpperCAmelCase , **__UpperCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
UpperCAmelCase__ = a_c
UpperCAmelCase__ = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(__UpperCAmelCase ) , FadeOut(__UpperCAmelCase , run_time=0.5 ) , )
UpperCAmelCase__ = MarkupText(f"""Inference on a model too large for GPU memory\nis successfully completed.""" , font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCAmelCase , run_time=3 ) , MoveToTarget(__UpperCAmelCase ) )
self.wait()
| 65 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class _UpperCAmelCase ( UpperCAmelCase_):
_lowerCAmelCase : Optional[torch.FloatTensor] = None
_lowerCAmelCase : torch.FloatTensor = None
_lowerCAmelCase : Optional[Tuple[torch.FloatTensor]] = None
_lowerCAmelCase : Optional[Tuple[torch.FloatTensor]] = None
class _UpperCAmelCase ( UpperCAmelCase_):
def __init__( self : Union[str, Any] , lowercase_ : Tuple=1 , lowercase_ : str=0 , lowercase_ : str=2 , lowercase_ : Union[str, Any]=512 , lowercase_ : List[str]="cls" , lowercase_ : Optional[int]=False , lowercase_ : str=True , **lowercase_ : str , ):
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
snake_case_ : Tuple = project_dim
snake_case_ : Union[str, Any] = pooler_fn
snake_case_ : List[str] = learn_encoder
snake_case_ : Any = use_attention_mask
class _UpperCAmelCase ( UpperCAmelCase_):
_lowerCAmelCase : Tuple = [r'pooler', r'logit_scale']
_lowerCAmelCase : int = [r'position_ids', r'predictions.decoder.bias']
_lowerCAmelCase : Any = 'roberta'
_lowerCAmelCase : List[str] = RobertaSeriesConfig
def __init__( self : Tuple , lowercase_ : Optional[int] ):
super().__init__(__UpperCAmelCase )
snake_case_ : Optional[Any] = XLMRobertaModel(__UpperCAmelCase )
snake_case_ : List[str] = nn.Linear(config.hidden_size , config.project_dim )
snake_case_ : int = getattr(__UpperCAmelCase , '''has_pre_transformation''' , __UpperCAmelCase )
if self.has_pre_transformation:
snake_case_ : Dict = nn.Linear(config.hidden_size , config.project_dim )
snake_case_ : Union[str, Any] = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def _snake_case ( self : Optional[Any] , lowercase_ : Optional[torch.Tensor] = None , lowercase_ : Optional[torch.Tensor] = None , lowercase_ : Optional[torch.Tensor] = None , lowercase_ : Optional[torch.Tensor] = None , lowercase_ : Optional[torch.Tensor] = None , lowercase_ : Optional[torch.Tensor] = None , lowercase_ : Optional[torch.Tensor] = None , lowercase_ : Optional[torch.Tensor] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , ):
snake_case_ : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
snake_case_ : Tuple = self.base_model(
input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , position_ids=__UpperCAmelCase , head_mask=__UpperCAmelCase , inputs_embeds=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , output_attentions=__UpperCAmelCase , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=__UpperCAmelCase , )
if self.has_pre_transformation:
snake_case_ : Dict = outputs['''hidden_states'''][-2]
snake_case_ : Optional[Any] = self.pre_LN(__UpperCAmelCase )
snake_case_ : Optional[int] = self.transformation_pre(__UpperCAmelCase )
return TransformationModelOutput(
projection_state=__UpperCAmelCase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
snake_case_ : Any = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=__UpperCAmelCase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 264 | from __future__ import annotations
from scipy.special import comb # type: ignore
class A :
def __init__(self : List[Any] , __UpperCAmelCase : list[tuple[float, float]] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
UpperCAmelCase__ = len(__UpperCAmelCase ) - 1
def lowercase_ (self : int , __UpperCAmelCase : float ) -> list[float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
UpperCAmelCase__ = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , __UpperCAmelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(__UpperCAmelCase ) , 5 ) == 1
return output_values
def lowercase_ (self : Dict , __UpperCAmelCase : float ) -> tuple[float, float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
UpperCAmelCase__ = self.basis_function(__UpperCAmelCase )
UpperCAmelCase__ = 0.0
UpperCAmelCase__ = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def lowercase_ (self : Optional[int] , __UpperCAmelCase : float = 0.01 ) -> Optional[int]:
"""simple docstring"""
from matplotlib import pyplot as plt # type: ignore
UpperCAmelCase__ = [] # x coordinates of points to plot
UpperCAmelCase__ = [] # y coordinates of points to plot
UpperCAmelCase__ = 0.0
while t <= 1:
UpperCAmelCase__ = self.bezier_curve_function(__UpperCAmelCase )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
UpperCAmelCase__ = [i[0] for i in self.list_of_points]
UpperCAmelCase__ = [i[1] for i in self.list_of_points]
plt.plot(
__UpperCAmelCase , __UpperCAmelCase , color="blue" , label="Curve of Degree " + str(self.degree ) , )
plt.scatter(__UpperCAmelCase , __UpperCAmelCase , color="red" , label="Control Points" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 65 | 0 |
from __future__ import annotations
from collections import namedtuple
def a__ ( UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : int ) -> tuple:
UpperCAmelCase : List[Any] = namedtuple('''result''' , '''name value''' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('''Only one argument must be 0''' )
elif power < 0:
raise ValueError(
'''Power cannot be negative in any electrical/electronics system''' )
elif voltage == 0:
return result('''voltage''' , power / current )
elif current == 0:
return result('''current''' , power / voltage )
elif power == 0:
return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336 | import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(UpperCAmelCase_ ) , 'Tatoeba directory does not exist.' )
class A ( unittest.TestCase ):
@cached_property
def lowercase_ (self : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = tempfile.mkdtemp()
return TatoebaConverter(save_dir=__UpperCAmelCase )
@slow
def lowercase_ (self : List[Any] ) -> Optional[int]:
"""simple docstring"""
self.resolver.convert_models(["heb-eng"] )
@slow
def lowercase_ (self : Dict ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.resolver.write_model_card("opus-mt-he-en" , dry_run=__UpperCAmelCase )
assert mmeta["long_pair"] == "heb-eng"
| 65 | 0 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 261 | import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
UpperCamelCase__ = np.linspace(start=0, stop=7_5, num=7_5, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
UpperCamelCase__ = [0, 2_5, 5_0]
UpperCamelCase__ = [2_5, 5_0, 7_5]
UpperCamelCase__ = fuzz.membership.trimf(X, abca)
UpperCamelCase__ = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
UpperCamelCase__ = np.ones(7_5)
UpperCamelCase__ = np.zeros((7_5,))
# 1. Union = max(µA(x), µB(x))
UpperCamelCase__ = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
UpperCamelCase__ = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
UpperCamelCase__ = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
UpperCamelCase__ = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
UpperCamelCase__ = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
UpperCamelCase__ = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
UpperCamelCase__ = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
UpperCamelCase__ = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 1_0)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 65 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
_snake_case = logging.get_logger(__name__)
class UpperCamelCase ( UpperCAmelCase_ ):
def __init__( self : Any , *UpperCAmelCase__ : Any , **UpperCAmelCase__ : Tuple ) -> None:
warnings.warn(
"""The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use GLPNImageProcessor instead.""" , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
| 294 | from __future__ import annotations
from collections import deque
class A :
def __init__(self : Dict , __UpperCAmelCase : list[str] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = []
self.adlist.append(
{"value": "", "next_states": [], "fail_state": 0, "output": []} )
for keyword in keywords:
self.add_keyword(__UpperCAmelCase )
self.set_fail_transitions()
def lowercase_ (self : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : str ) -> int | None:
"""simple docstring"""
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def lowercase_ (self : Dict , __UpperCAmelCase : str ) -> None:
"""simple docstring"""
UpperCAmelCase__ = 0
for character in keyword:
UpperCAmelCase__ = self.find_next_state(__UpperCAmelCase , __UpperCAmelCase )
if next_state is None:
self.adlist.append(
{
"value": character,
"next_states": [],
"fail_state": 0,
"output": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
UpperCAmelCase__ = len(self.adlist ) - 1
else:
UpperCAmelCase__ = next_state
self.adlist[current_state]["output"].append(__UpperCAmelCase )
def lowercase_ (self : Optional[int] ) -> None:
"""simple docstring"""
UpperCAmelCase__ = deque()
for node in self.adlist[0]["next_states"]:
q.append(__UpperCAmelCase )
UpperCAmelCase__ = 0
while q:
UpperCAmelCase__ = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(__UpperCAmelCase )
UpperCAmelCase__ = self.adlist[r]["fail_state"]
while (
self.find_next_state(__UpperCAmelCase , self.adlist[child]["value"] ) is None
and state != 0
):
UpperCAmelCase__ = self.adlist[state]["fail_state"]
UpperCAmelCase__ = self.find_next_state(
__UpperCAmelCase , self.adlist[child]["value"] )
if self.adlist[child]["fail_state"] is None:
UpperCAmelCase__ = 0
UpperCAmelCase__ = (
self.adlist[child]["output"]
+ self.adlist[self.adlist[child]["fail_state"]]["output"]
)
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : str ) -> dict[str, list[int]]:
"""simple docstring"""
UpperCAmelCase__ = {} # returns a dict with keywords and list of its occurrences
UpperCAmelCase__ = 0
for i in range(len(__UpperCAmelCase ) ):
while (
self.find_next_state(__UpperCAmelCase , string[i] ) is None
and current_state != 0
):
UpperCAmelCase__ = self.adlist[current_state]["fail_state"]
UpperCAmelCase__ = self.find_next_state(__UpperCAmelCase , string[i] )
if next_state is None:
UpperCAmelCase__ = 0
else:
UpperCAmelCase__ = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
UpperCAmelCase__ = []
result[key].append(i - len(__UpperCAmelCase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowercase( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
lowercase__ = CycleDiffusionPipeline
lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'negative_prompt',
'height',
'width',
'negative_prompt_embeds',
}
lowercase__ = PipelineTesterMixin.required_optional_params - {'latents'}
lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"source_prompt"} )
lowercase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowercase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D"""), up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D"""), cross_attention_dim=32, )
_snake_case : Optional[Any] = DDIMScheduler(
beta_start=0.00_085, beta_end=0.012, beta_schedule="""scaled_linear""", num_train_timesteps=1_000, clip_sample=__UpperCAmelCase, set_alpha_to_one=__UpperCAmelCase, )
torch.manual_seed(0 )
_snake_case : Any = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""], up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""], latent_channels=4, )
torch.manual_seed(0 )
_snake_case : Tuple = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, )
_snake_case : Union[str, Any] = CLIPTextModel(__UpperCAmelCase )
_snake_case : Any = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_snake_case : Optional[Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCamelCase_ ( self: str, a_: Union[str, Any], a_: List[Any]=0 ):
'''simple docstring'''
_snake_case : Optional[int] = floats_tensor((1, 3, 32, 32), rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
_snake_case : Any = image / 2 + 0.5
if str(__UpperCAmelCase ).startswith("""mps""" ):
_snake_case : List[Any] = torch.manual_seed(__UpperCAmelCase )
else:
_snake_case : Optional[Any] = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
_snake_case : List[str] = {
"""prompt""": """An astronaut riding an elephant""",
"""source_prompt""": """An astronaut riding a horse""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""eta""": 0.1,
"""strength""": 0.8,
"""guidance_scale""": 3,
"""source_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_snake_case : Optional[Any] = self.get_dummy_components()
_snake_case : Optional[int] = CycleDiffusionPipeline(**__UpperCAmelCase )
_snake_case : Optional[Any] = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_snake_case : Union[str, Any] = self.get_dummy_inputs(__UpperCAmelCase )
_snake_case : Tuple = pipe(**__UpperCAmelCase )
_snake_case : Optional[int] = output.images
_snake_case : Dict = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
_snake_case : int = np.array([0.4_459, 0.4_943, 0.4_544, 0.6_643, 0.5_474, 0.4_327, 0.5_701, 0.5_959, 0.5_179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != """cuda""", """This test requires a GPU""" )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[Any] = self.get_dummy_components()
for name, module in components.items():
if hasattr(__UpperCAmelCase, """half""" ):
_snake_case : Optional[Any] = module.half()
_snake_case : str = CycleDiffusionPipeline(**__UpperCAmelCase )
_snake_case : List[Any] = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_snake_case : Tuple = self.get_dummy_inputs(__UpperCAmelCase )
_snake_case : Optional[Any] = pipe(**__UpperCAmelCase )
_snake_case : List[Any] = output.images
_snake_case : Optional[int] = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
_snake_case : Optional[Any] = np.array([0.3_506, 0.4_543, 0.446, 0.4_575, 0.5_195, 0.4_155, 0.5_273, 0.518, 0.4_116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
return super().test_save_load_local()
@unittest.skip("""non-deterministic pipeline""" )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
return super().test_inference_batch_single_identical()
@skip_mps
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
_snake_case : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy""" )
_snake_case : List[str] = init_image.resize((512, 512) )
_snake_case : Union[str, Any] = """CompVis/stable-diffusion-v1-4"""
_snake_case : Any = DDIMScheduler.from_pretrained(__UpperCAmelCase, subfolder="""scheduler""" )
_snake_case : int = CycleDiffusionPipeline.from_pretrained(
__UpperCAmelCase, scheduler=__UpperCAmelCase, safety_checker=__UpperCAmelCase, torch_dtype=torch.floataa, revision="""fp16""" )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
_snake_case : Dict = """A black colored car"""
_snake_case : int = """A blue colored car"""
_snake_case : str = torch.manual_seed(0 )
_snake_case : Any = pipe(
prompt=__UpperCAmelCase, source_prompt=__UpperCAmelCase, image=__UpperCAmelCase, num_inference_steps=100, eta=0.1, strength=0.85, guidance_scale=3, source_guidance_scale=1, generator=__UpperCAmelCase, output_type="""np""", )
_snake_case : Tuple = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
_snake_case : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy""" )
_snake_case : Union[str, Any] = init_image.resize((512, 512) )
_snake_case : Any = """CompVis/stable-diffusion-v1-4"""
_snake_case : Optional[Any] = DDIMScheduler.from_pretrained(__UpperCAmelCase, subfolder="""scheduler""" )
_snake_case : Dict = CycleDiffusionPipeline.from_pretrained(__UpperCAmelCase, scheduler=__UpperCAmelCase, safety_checker=__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
_snake_case : Tuple = """A black colored car"""
_snake_case : List[Any] = """A blue colored car"""
_snake_case : Tuple = torch.manual_seed(0 )
_snake_case : Dict = pipe(
prompt=__UpperCAmelCase, source_prompt=__UpperCAmelCase, image=__UpperCAmelCase, num_inference_steps=100, eta=0.1, strength=0.85, guidance_scale=3, source_guidance_scale=1, generator=__UpperCAmelCase, output_type="""np""", )
_snake_case : int = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 64 | import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
UpperCamelCase__ = logging.get_logger(__name__)
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : int = ['input_values', 'attention_mask']
def __init__(self : Any , __UpperCAmelCase : int = 1 , __UpperCAmelCase : int = 1_6_0_0_0 , __UpperCAmelCase : float = 0.0 , __UpperCAmelCase : bool = False , __UpperCAmelCase : int = 8_0 , __UpperCAmelCase : int = 1_6 , __UpperCAmelCase : int = 6_4 , __UpperCAmelCase : str = "hann_window" , __UpperCAmelCase : float = 1.0 , __UpperCAmelCase : float = 8_0 , __UpperCAmelCase : float = 7_6_0_0 , __UpperCAmelCase : float = 1E-10 , __UpperCAmelCase : int = 2 , __UpperCAmelCase : bool = True , **__UpperCAmelCase : Any , ) -> str:
"""simple docstring"""
super().__init__(feature_size=__UpperCAmelCase , sampling_rate=__UpperCAmelCase , padding_value=__UpperCAmelCase , **__UpperCAmelCase )
UpperCAmelCase__ = do_normalize
UpperCAmelCase__ = return_attention_mask
UpperCAmelCase__ = num_mel_bins
UpperCAmelCase__ = hop_length
UpperCAmelCase__ = win_length
UpperCAmelCase__ = win_function
UpperCAmelCase__ = frame_signal_scale
UpperCAmelCase__ = fmin
UpperCAmelCase__ = fmax
UpperCAmelCase__ = mel_floor
UpperCAmelCase__ = reduction_factor
UpperCAmelCase__ = win_length * sampling_rate // 1_0_0_0
UpperCAmelCase__ = hop_length * sampling_rate // 1_0_0_0
UpperCAmelCase__ = optimal_fft_length(self.sample_size )
UpperCAmelCase__ = (self.n_fft // 2) + 1
UpperCAmelCase__ = window_function(window_length=self.sample_size , name=self.win_function , periodic=__UpperCAmelCase )
UpperCAmelCase__ = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm="slaney" , mel_scale="slaney" , )
if frame_signal_scale != 1.0:
warnings.warn(
"The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers" , __UpperCAmelCase , )
if reduction_factor != 2.0:
warnings.warn(
"The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers" , __UpperCAmelCase , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def lowercase_ (__UpperCAmelCase : List[np.ndarray] , __UpperCAmelCase : List[np.ndarray] , __UpperCAmelCase : float = 0.0 ) -> List[np.ndarray]:
"""simple docstring"""
if attention_mask is not None:
UpperCAmelCase__ = np.array(__UpperCAmelCase , np.intaa )
UpperCAmelCase__ = []
for vector, length in zip(__UpperCAmelCase , attention_mask.sum(-1 ) ):
UpperCAmelCase__ = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
UpperCAmelCase__ = padding_value
normed_input_values.append(__UpperCAmelCase )
else:
UpperCAmelCase__ = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def lowercase_ (self : Optional[int] , __UpperCAmelCase : np.ndarray , ) -> np.ndarray:
"""simple docstring"""
UpperCAmelCase__ = spectrogram(
__UpperCAmelCase , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel="log10" , )
return log_mel_spec.T
def __call__(self : Any , __UpperCAmelCase : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , __UpperCAmelCase : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , __UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : bool = False , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[Union[str, TensorType]] = None , __UpperCAmelCase : Optional[int] = None , **__UpperCAmelCase : str , ) -> BatchFeature:
"""simple docstring"""
if audio is None and audio_target is None:
raise ValueError("You must provide either `audio` or `audio_target` values." )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
if audio is not None:
UpperCAmelCase__ = self._process_audio(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , )
else:
UpperCAmelCase__ = None
if audio_target is not None:
UpperCAmelCase__ = self._process_audio(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , )
if inputs is None:
return inputs_target
else:
UpperCAmelCase__ = inputs_target["input_values"]
UpperCAmelCase__ = inputs_target.get("attention_mask" )
if decoder_attention_mask is not None:
UpperCAmelCase__ = decoder_attention_mask
return inputs
def lowercase_ (self : Optional[int] , __UpperCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __UpperCAmelCase : bool = False , __UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : bool = False , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[Union[str, TensorType]] = None , **__UpperCAmelCase : Any , ) -> BatchFeature:
"""simple docstring"""
UpperCAmelCase__ = isinstance(__UpperCAmelCase , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
UpperCAmelCase__ = is_batched_numpy or (
isinstance(__UpperCAmelCase , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCAmelCase__ = [np.asarray(__UpperCAmelCase , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(__UpperCAmelCase , np.ndarray ):
UpperCAmelCase__ = np.asarray(__UpperCAmelCase , dtype=np.floataa )
elif isinstance(__UpperCAmelCase , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
UpperCAmelCase__ = speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCAmelCase__ = [speech]
# needed to make pad() work on spectrogram inputs
UpperCAmelCase__ = self.feature_size
# convert into correct format for padding
if is_target:
UpperCAmelCase__ = [self._extract_mel_features(__UpperCAmelCase ) for waveform in speech]
UpperCAmelCase__ = BatchFeature({"input_values": features} )
UpperCAmelCase__ = self.num_mel_bins
else:
UpperCAmelCase__ = BatchFeature({"input_values": speech} )
UpperCAmelCase__ = self.pad(
__UpperCAmelCase , padding=__UpperCAmelCase , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , **__UpperCAmelCase , )
UpperCAmelCase__ = feature_size_hack
# convert input values to correct format
UpperCAmelCase__ = padded_inputs["input_values"]
if not isinstance(input_values[0] , np.ndarray ):
UpperCAmelCase__ = [np.asarray(__UpperCAmelCase , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(__UpperCAmelCase , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
UpperCAmelCase__ = [array.astype(np.floataa ) for array in input_values]
elif isinstance(__UpperCAmelCase , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
UpperCAmelCase__ = input_values.astype(np.floataa )
# convert attention_mask to correct format
UpperCAmelCase__ = padded_inputs.get("attention_mask" )
if attention_mask is not None:
UpperCAmelCase__ = [np.asarray(__UpperCAmelCase , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
UpperCAmelCase__ = (
attention_mask
if self._get_padding_strategies(__UpperCAmelCase , max_length=__UpperCAmelCase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
UpperCAmelCase__ = self.zero_mean_unit_var_norm(
padded_inputs["input_values"] , attention_mask=__UpperCAmelCase , padding_value=self.padding_value )
if return_tensors is not None:
UpperCAmelCase__ = padded_inputs.convert_to_tensors(__UpperCAmelCase )
return padded_inputs
def lowercase_ (self : Tuple ) -> Dict[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = super().to_dict()
# Don't serialize these as they are derived from the other properties.
UpperCAmelCase__ = ["window", "mel_filters", "sample_size", "sample_stride", "n_fft", "n_freqs"]
for name in names:
if name in output:
del output[name]
return output
| 65 | 0 |
"""simple docstring"""
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
_A : List[Any] = [
# (stable-diffusion, HF Diffusers)
("""time_embed.0.weight""", """time_embedding.linear_1.weight"""),
("""time_embed.0.bias""", """time_embedding.linear_1.bias"""),
("""time_embed.2.weight""", """time_embedding.linear_2.weight"""),
("""time_embed.2.bias""", """time_embedding.linear_2.bias"""),
("""input_blocks.0.0.weight""", """conv_in.weight"""),
("""input_blocks.0.0.bias""", """conv_in.bias"""),
("""out.0.weight""", """conv_norm_out.weight"""),
("""out.0.bias""", """conv_norm_out.bias"""),
("""out.2.weight""", """conv_out.weight"""),
("""out.2.bias""", """conv_out.bias"""),
]
_A : Union[str, Any] = [
# (stable-diffusion, HF Diffusers)
("""in_layers.0""", """norm1"""),
("""in_layers.2""", """conv1"""),
("""out_layers.0""", """norm2"""),
("""out_layers.3""", """conv2"""),
("""emb_layers.1""", """time_emb_proj"""),
("""skip_connection""", """conv_shortcut"""),
]
_A : List[Any] = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
_A : Optional[int] = F"down_blocks.{i}.resnets.{j}."
_A : Optional[Any] = F"input_blocks.{3*i + j + 1}.0."
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
_A : int = F"down_blocks.{i}.attentions.{j}."
_A : Optional[Any] = F"input_blocks.{3*i + j + 1}.1."
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
_A : int = F"up_blocks.{i}.resnets.{j}."
_A : Optional[Any] = F"output_blocks.{3*i + j}.0."
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
_A : Dict = F"up_blocks.{i}.attentions.{j}."
_A : Union[str, Any] = F"output_blocks.{3*i + j}.1."
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
_A : int = F"down_blocks.{i}.downsamplers.0.conv."
_A : Dict = F"input_blocks.{3*(i+1)}.0.op."
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
_A : Tuple = F"up_blocks.{i}.upsamplers.0."
_A : Dict = F"output_blocks.{3*i + 2}.{1 if i == 0 else 2}."
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
_A : Any = """mid_block.attentions.0."""
_A : Dict = """middle_block.1."""
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
_A : List[Any] = F"mid_block.resnets.{j}."
_A : int = F"middle_block.{2*j}."
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def __magic_name__ ( __snake_case : List[str] ) -> Union[str, Any]:
lowercase : List[Any] = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
lowercase : str = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
lowercase : Union[str, Any] = v.replace(__A , __A )
lowercase : Any = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
lowercase : Union[str, Any] = v.replace(__A , __A )
lowercase : Any = v
lowercase : str = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
_A : Union[str, Any] = [
# (stable-diffusion, HF Diffusers)
("""nin_shortcut""", """conv_shortcut"""),
("""norm_out""", """conv_norm_out"""),
("""mid.attn_1.""", """mid_block.attentions.0."""),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
_A : Union[str, Any] = F"encoder.down_blocks.{i}.resnets.{j}."
_A : Any = F"encoder.down.{i}.block.{j}."
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
_A : int = F"down_blocks.{i}.downsamplers.0."
_A : Dict = F"down.{i}.downsample."
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
_A : Union[str, Any] = F"up_blocks.{i}.upsamplers.0."
_A : Union[str, Any] = F"up.{3-i}.upsample."
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
_A : Tuple = F"decoder.up_blocks.{i}.resnets.{j}."
_A : int = F"decoder.up.{3-i}.block.{j}."
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
_A : Any = F"mid_block.resnets.{i}."
_A : Optional[Any] = F"mid.block_{i+1}."
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
_A : Tuple = [
# (stable-diffusion, HF Diffusers)
("""norm.""", """group_norm."""),
("""q.""", """query."""),
("""k.""", """key."""),
("""v.""", """value."""),
("""proj_out.""", """proj_attn."""),
]
def __magic_name__ ( __snake_case : List[Any] ) -> Dict:
return w.reshape(*w.shape , 1 , 1 )
def __magic_name__ ( __snake_case : Dict ) -> str:
lowercase : Any = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
lowercase : Dict = v.replace(__A , __A )
lowercase : Any = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
lowercase : int = v.replace(__A , __A )
lowercase : Optional[int] = v
lowercase : List[str] = {v: vae_state_dict[k] for k, v in mapping.items()}
lowercase : int = ["q", "k", "v", "proj_out"]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f"""mid.attn_1.{weight_name}.weight""" in k:
print(f"""Reshaping {k} for SD format""" )
lowercase : List[Any] = reshape_weight_for_sd(__A )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
_A : List[Any] = [
# (stable-diffusion, HF Diffusers)
("""resblocks.""", """text_model.encoder.layers."""),
("""ln_1""", """layer_norm1"""),
("""ln_2""", """layer_norm2"""),
(""".c_fc.""", """.fc1."""),
(""".c_proj.""", """.fc2."""),
(""".attn""", """.self_attn"""),
("""ln_final.""", """transformer.text_model.final_layer_norm."""),
("""token_embedding.weight""", """transformer.text_model.embeddings.token_embedding.weight"""),
("""positional_embedding""", """transformer.text_model.embeddings.position_embedding.weight"""),
]
_A : Optional[int] = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
_A : List[Any] = re.compile("""|""".join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
_A : int = {"""q""": 0, """k""": 1, """v""": 2}
def __magic_name__ ( __snake_case : int ) -> Union[str, Any]:
lowercase : List[Any] = {}
lowercase : str = {}
lowercase : Optional[int] = {}
for k, v in text_enc_dict.items():
if (
k.endswith(".self_attn.q_proj.weight" )
or k.endswith(".self_attn.k_proj.weight" )
or k.endswith(".self_attn.v_proj.weight" )
):
lowercase : int = k[: -len(".q_proj.weight" )]
lowercase : Any = k[-len("q_proj.weight" )]
if k_pre not in capture_qkv_weight:
lowercase : int = [None, None, None]
lowercase : str = v
continue
if (
k.endswith(".self_attn.q_proj.bias" )
or k.endswith(".self_attn.k_proj.bias" )
or k.endswith(".self_attn.v_proj.bias" )
):
lowercase : int = k[: -len(".q_proj.bias" )]
lowercase : Union[str, Any] = k[-len("q_proj.bias" )]
if k_pre not in capture_qkv_bias:
lowercase : str = [None, None, None]
lowercase : Optional[int] = v
continue
lowercase : Dict = textenc_pattern.sub(lambda __snake_case : protected[re.escape(m.group(0 ) )] , __A )
lowercase : List[str] = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
lowercase : Optional[Any] = textenc_pattern.sub(lambda __snake_case : protected[re.escape(m.group(0 ) )] , __A )
lowercase : List[str] = torch.cat(__A )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
lowercase : List[str] = textenc_pattern.sub(lambda __snake_case : protected[re.escape(m.group(0 ) )] , __A )
lowercase : Tuple = torch.cat(__A )
return new_state_dict
def __magic_name__ ( __snake_case : Tuple ) -> Tuple:
return text_enc_dict
if __name__ == "__main__":
_A : Tuple = argparse.ArgumentParser()
parser.add_argument("""--model_path""", default=None, type=str, required=True, help="""Path to the model to convert.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""")
parser.add_argument(
"""--use_safetensors""", action="""store_true""", help="""Save weights use safetensors, default is ckpt."""
)
_A : Tuple = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
_A : str = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.safetensors""")
_A : Tuple = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.safetensors""")
_A : int = osp.join(args.model_path, """text_encoder""", """model.safetensors""")
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
_A : Any = load_file(unet_path, device="""cpu""")
else:
_A : int = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.bin""")
_A : str = torch.load(unet_path, map_location="""cpu""")
if osp.exists(vae_path):
_A : Dict = load_file(vae_path, device="""cpu""")
else:
_A : Any = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.bin""")
_A : Optional[Any] = torch.load(vae_path, map_location="""cpu""")
if osp.exists(text_enc_path):
_A : Optional[Any] = load_file(text_enc_path, device="""cpu""")
else:
_A : Optional[Any] = osp.join(args.model_path, """text_encoder""", """pytorch_model.bin""")
_A : List[Any] = torch.load(text_enc_path, map_location="""cpu""")
# Convert the UNet model
_A : Optional[Any] = convert_unet_state_dict(unet_state_dict)
_A : Optional[Any] = {"""model.diffusion_model.""" + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
_A : Optional[Any] = convert_vae_state_dict(vae_state_dict)
_A : Union[str, Any] = {"""first_stage_model.""" + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
_A : Union[str, Any] = """text_model.encoder.layers.22.layer_norm2.bias""" in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
_A : Any = {"""transformer.""" + k: v for k, v in text_enc_dict.items()}
_A : List[str] = convert_text_enc_state_dict_vaa(text_enc_dict)
_A : str = {"""cond_stage_model.model.""" + k: v for k, v in text_enc_dict.items()}
else:
_A : List[Any] = convert_text_enc_state_dict(text_enc_dict)
_A : Tuple = {"""cond_stage_model.transformer.""" + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
_A : List[str] = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
_A : Any = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
_A : Any = {"""state_dict""": state_dict}
torch.save(state_dict, args.checkpoint_path)
| 202 | from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Optional[torch.FloatTensor] = None
__UpperCAmelCase : torch.FloatTensor = None
__UpperCAmelCase : Optional[Tuple[torch.FloatTensor]] = None
__UpperCAmelCase : Optional[Tuple[torch.FloatTensor]] = None
class A ( UpperCAmelCase_ ):
def __init__(self : Union[str, Any] , __UpperCAmelCase : Tuple=1 , __UpperCAmelCase : str=0 , __UpperCAmelCase : str=2 , __UpperCAmelCase : Union[str, Any]=5_1_2 , __UpperCAmelCase : List[str]="cls" , __UpperCAmelCase : Optional[int]=False , __UpperCAmelCase : str=True , **__UpperCAmelCase : str , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
UpperCAmelCase__ = project_dim
UpperCAmelCase__ = pooler_fn
UpperCAmelCase__ = learn_encoder
UpperCAmelCase__ = use_attention_mask
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Tuple = [r'pooler', r'logit_scale']
__UpperCAmelCase : int = [r'position_ids', r'predictions.decoder.bias']
__UpperCAmelCase : Any = 'roberta'
__UpperCAmelCase : List[str] = RobertaSeriesConfig
def __init__(self : Tuple , __UpperCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
super().__init__(__UpperCAmelCase )
UpperCAmelCase__ = XLMRobertaModel(__UpperCAmelCase )
UpperCAmelCase__ = nn.Linear(config.hidden_size , config.project_dim )
UpperCAmelCase__ = getattr(__UpperCAmelCase , "has_pre_transformation" , __UpperCAmelCase )
if self.has_pre_transformation:
UpperCAmelCase__ = nn.Linear(config.hidden_size , config.project_dim )
UpperCAmelCase__ = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def lowercase_ (self : Optional[Any] , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[bool] = None , ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase__ = self.base_model(
input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , position_ids=__UpperCAmelCase , head_mask=__UpperCAmelCase , inputs_embeds=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , output_attentions=__UpperCAmelCase , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=__UpperCAmelCase , )
if self.has_pre_transformation:
UpperCAmelCase__ = outputs["hidden_states"][-2]
UpperCAmelCase__ = self.pre_LN(__UpperCAmelCase )
UpperCAmelCase__ = self.transformation_pre(__UpperCAmelCase )
return TransformationModelOutput(
projection_state=__UpperCAmelCase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
UpperCAmelCase__ = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=__UpperCAmelCase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 65 | 0 |
"""simple docstring"""
import math
import random
def snake_case ( A__ ,A__ = False ):
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
lowerCamelCase_ = 0.02
def snake_case ( A__ ,A__ ):
UpperCAmelCase_ : str = float(2 * (random.randint(1 ,1_00 )) - 1 )
for _ in range(__A ):
# Forward propagation
UpperCAmelCase_ : Optional[int] = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
UpperCAmelCase_ : Dict = (expected / 1_00) - layer_a
# Error delta
UpperCAmelCase_ : int = layer_1_error * sigmoid_function(__A ,__A )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 1_00
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase_ = int(input('''Expected value: '''))
lowerCamelCase_ = int(input('''Number of propagations: '''))
print(forward_propagation(expected, number_propagations))
| 268 | import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 6_50, 'eval_accuracy': 0.6, 'eval_loss': 0.9},
},
{
'framework': 'tensorflow',
'script': 'run_tf.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 6_00, 'eval_accuracy': 0.3, 'eval_loss': 0.9},
},
] )
class A ( unittest.TestCase ):
def lowercase_ (self : int ) -> Optional[Any]:
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="utf-8" , check=__UpperCAmelCase , )
assert hasattr(self , "env" )
def lowercase_ (self : List[Any] , __UpperCAmelCase : Optional[int]=1 ) -> Dict:
"""simple docstring"""
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-single""" , instance_count=__UpperCAmelCase , instance_type=self.instance_type , debugger_hook_config=__UpperCAmelCase , hyperparameters={**self.env.hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="py36" , )
def lowercase_ (self : Optional[Any] , __UpperCAmelCase : Tuple ) -> Optional[int]:
"""simple docstring"""
TrainingJobAnalytics(__UpperCAmelCase ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
def lowercase_ (self : Any ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.create_estimator()
# run training
estimator.fit()
# result dataframe
UpperCAmelCase__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCAmelCase__ = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
UpperCAmelCase__ = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCAmelCase__ = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , __UpperCAmelCase )
| 65 | 0 |
"""simple docstring"""
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() )
@pytest.fixture
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
class _lowerCamelCase :
def __init__(self , __a ) -> Optional[int]:
UpperCamelCase = metric_id
class _lowerCamelCase :
UpperCAmelCase_ = [MetricMock(UpperCAmelCase_ ) for metric_id in ['accuracy', 'mse', 'precision', 'codeparrot/apps_metric']]
def snake_case_ (self ) -> Union[str, Any]:
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() )
@pytest.mark.parametrize(
"func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if "tmp_path" in args:
UpperCamelCase = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(__A , match="https://huggingface.co/docs/evaluate" ):
func(*__A )
| 153 | import math
import random
def lowerCAmelCase_ ( __A, __A = False ) -> float:
'''simple docstring'''
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
UpperCamelCase__ = 0.0_2
def lowerCAmelCase_ ( __A, __A ) -> float:
'''simple docstring'''
UpperCAmelCase__ = float(2 * (random.randint(1, 100 )) - 1 )
for _ in range(__A ):
# Forward propagation
UpperCAmelCase__ = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
UpperCAmelCase__ = (expected / 100) - layer_a
# Error delta
UpperCAmelCase__ = layer_1_error * sigmoid_function(__A, __A )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ = int(input('Expected value: '))
UpperCamelCase__ = int(input('Number of propagations: '))
print(forward_propagation(expected, number_propagations))
| 65 | 0 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ :int = logging.get_logger(__name__)
# TODO Update this
a_ :Optional[Any] = {
"facebook/esm-1b": "https://huggingface.co/facebook/esm-1b/resolve/main/config.json",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class snake_case__ ( UpperCAmelCase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 'esm'
def __init__( self : List[str], _snake_case : List[str]=None, _snake_case : str=None, _snake_case : Tuple=None, _snake_case : List[str]=7_6_8, _snake_case : List[str]=1_2, _snake_case : Any=1_2, _snake_case : List[str]=3_0_7_2, _snake_case : Any=0.1, _snake_case : Any=0.1, _snake_case : int=1_0_2_6, _snake_case : Optional[int]=0.0_2, _snake_case : Optional[Any]=1e-12, _snake_case : int="absolute", _snake_case : Optional[Any]=True, _snake_case : Optional[Any]=None, _snake_case : str=False, _snake_case : Dict=False, _snake_case : List[str]=None, _snake_case : Optional[Any]=None, **_snake_case : Union[str, Any], ) ->Any:
super().__init__(pad_token_id=__UpperCAmelCase, mask_token_id=__UpperCAmelCase, **__UpperCAmelCase )
snake_case__ : Optional[int] = vocab_size
snake_case__ : Union[str, Any] = hidden_size
snake_case__ : Tuple = num_hidden_layers
snake_case__ : Optional[Any] = num_attention_heads
snake_case__ : List[Any] = intermediate_size
snake_case__ : str = hidden_dropout_prob
snake_case__ : Union[str, Any] = attention_probs_dropout_prob
snake_case__ : List[str] = max_position_embeddings
snake_case__ : List[str] = initializer_range
snake_case__ : Union[str, Any] = layer_norm_eps
snake_case__ : Optional[Any] = position_embedding_type
snake_case__ : Optional[int] = use_cache
snake_case__ : Optional[int] = emb_layer_norm_before
snake_case__ : int = token_dropout
snake_case__ : int = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('No esmfold_config supplied for folding model, using default values.' )
snake_case__ : List[str] = EsmFoldConfig()
elif isinstance(__UpperCAmelCase, __UpperCAmelCase ):
snake_case__ : List[Any] = EsmFoldConfig(**__UpperCAmelCase )
snake_case__ : Union[str, Any] = esmfold_config
if vocab_list is None:
logger.warning('No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!' )
snake_case__ : Tuple = get_default_vocab_list()
else:
snake_case__ : int = vocab_list
else:
snake_case__ : str = None
snake_case__ : Optional[int] = None
if self.esmfold_config is not None and getattr(self.esmfold_config, 'use_esm_attn_map', __UpperCAmelCase ):
raise ValueError('The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!' )
def lowercase_ ( self : Tuple ) ->Dict:
snake_case__ : Dict = super().to_dict()
if isinstance(self.esmfold_config, __UpperCAmelCase ):
snake_case__ : List[Any] = self.esmfold_config.to_dict()
return output
@dataclass
class snake_case__ :
"""simple docstring"""
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = 128
_SCREAMING_SNAKE_CASE = None
def lowercase_ ( self : Optional[Any] ) ->List[Any]:
if self.trunk is None:
snake_case__ : List[str] = TrunkConfig()
elif isinstance(self.trunk, __UpperCAmelCase ):
snake_case__ : str = TrunkConfig(**self.trunk )
def lowercase_ ( self : str ) ->Tuple:
snake_case__ : Optional[int] = asdict(self )
snake_case__ : int = self.trunk.to_dict()
return output
@dataclass
class snake_case__ :
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 48
_SCREAMING_SNAKE_CASE = 1024
_SCREAMING_SNAKE_CASE = 128
_SCREAMING_SNAKE_CASE = 32
_SCREAMING_SNAKE_CASE = 32
_SCREAMING_SNAKE_CASE = 32
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = 4
_SCREAMING_SNAKE_CASE = 128
_SCREAMING_SNAKE_CASE = None
def lowercase_ ( self : Optional[int] ) ->Dict:
if self.structure_module is None:
snake_case__ : Optional[int] = StructureModuleConfig()
elif isinstance(self.structure_module, __UpperCAmelCase ):
snake_case__ : Any = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'
F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'
F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
snake_case__ : List[str] = self.sequence_state_dim // self.sequence_head_width
snake_case__ : Optional[Any] = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'
F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'
F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def lowercase_ ( self : Any ) ->Optional[int]:
snake_case__ : Union[str, Any] = asdict(self )
snake_case__ : Optional[int] = self.structure_module.to_dict()
return output
@dataclass
class snake_case__ :
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 384
_SCREAMING_SNAKE_CASE = 128
_SCREAMING_SNAKE_CASE = 16
_SCREAMING_SNAKE_CASE = 128
_SCREAMING_SNAKE_CASE = 12
_SCREAMING_SNAKE_CASE = 4
_SCREAMING_SNAKE_CASE = 8
_SCREAMING_SNAKE_CASE = 0.1
_SCREAMING_SNAKE_CASE = 8
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = 7
_SCREAMING_SNAKE_CASE = 10
_SCREAMING_SNAKE_CASE = 1e-8
_SCREAMING_SNAKE_CASE = 1e5
def lowercase_ ( self : Optional[Any] ) ->str:
return asdict(self )
def lowercase_ ():
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 277 | from __future__ import annotations
class A :
def __init__(self : Union[str, Any] , __UpperCAmelCase : list[list[int]] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = TypeError(
"Matrices must be formed from a list of zero or more lists containing at "
"least one and the same number of values, each of which must be of type "
"int or float." )
if len(__UpperCAmelCase ) != 0:
UpperCAmelCase__ = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(__UpperCAmelCase ) != cols:
raise error
for value in row:
if not isinstance(__UpperCAmelCase , (int, float) ):
raise error
UpperCAmelCase__ = rows
else:
UpperCAmelCase__ = []
def lowercase_ (self : Any ) -> list[list[int]]:
"""simple docstring"""
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def lowercase_ (self : Any ) -> int:
"""simple docstring"""
return len(self.rows )
@property
def lowercase_ (self : Union[str, Any] ) -> int:
"""simple docstring"""
return len(self.rows[0] )
@property
def lowercase_ (self : List[Any] ) -> tuple[int, int]:
"""simple docstring"""
return (self.num_rows, self.num_columns)
@property
def lowercase_ (self : Tuple ) -> bool:
"""simple docstring"""
return self.order[0] == self.order[1]
def lowercase_ (self : Any ) -> Matrix:
"""simple docstring"""
UpperCAmelCase__ = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(__UpperCAmelCase )
def lowercase_ (self : int ) -> int:
"""simple docstring"""
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def lowercase_ (self : Tuple ) -> bool:
"""simple docstring"""
return bool(self.determinant() )
def lowercase_ (self : Dict , __UpperCAmelCase : int , __UpperCAmelCase : int ) -> int:
"""simple docstring"""
UpperCAmelCase__ = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(__UpperCAmelCase ).determinant()
def lowercase_ (self : int , __UpperCAmelCase : int , __UpperCAmelCase : int ) -> int:
"""simple docstring"""
if (row + column) % 2 == 0:
return self.get_minor(__UpperCAmelCase , __UpperCAmelCase )
return -1 * self.get_minor(__UpperCAmelCase , __UpperCAmelCase )
def lowercase_ (self : Union[str, Any] ) -> Matrix:
"""simple docstring"""
return Matrix(
[
[self.get_minor(__UpperCAmelCase , __UpperCAmelCase ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def lowercase_ (self : List[str] ) -> Matrix:
"""simple docstring"""
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def lowercase_ (self : Optional[Any] ) -> Matrix:
"""simple docstring"""
UpperCAmelCase__ = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(__UpperCAmelCase )
def lowercase_ (self : List[Any] ) -> Matrix:
"""simple docstring"""
UpperCAmelCase__ = self.determinant()
if not determinant:
raise TypeError("Only matrices with a non-zero determinant have an inverse" )
return self.adjugate() * (1 / determinant)
def __repr__(self : Dict ) -> str:
"""simple docstring"""
return str(self.rows )
def __str__(self : Optional[Any] ) -> str:
"""simple docstring"""
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
"[" + ". ".join([str(__UpperCAmelCase ) for value in row] ) + ".]"
for row in self.rows
] )
+ "]"
)
def lowercase_ (self : Optional[int] , __UpperCAmelCase : list[int] , __UpperCAmelCase : int | None = None ) -> None:
"""simple docstring"""
UpperCAmelCase__ = TypeError("Row must be a list containing all ints and/or floats" )
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise type_error
for value in row:
if not isinstance(__UpperCAmelCase , (int, float) ):
raise type_error
if len(__UpperCAmelCase ) != self.num_columns:
raise ValueError(
"Row must be equal in length to the other rows in the matrix" )
if position is None:
self.rows.append(__UpperCAmelCase )
else:
UpperCAmelCase__ = self.rows[0:position] + [row] + self.rows[position:]
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : list[int] , __UpperCAmelCase : int | None = None ) -> None:
"""simple docstring"""
UpperCAmelCase__ = TypeError(
"Column must be a list containing all ints and/or floats" )
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise type_error
for value in column:
if not isinstance(__UpperCAmelCase , (int, float) ):
raise type_error
if len(__UpperCAmelCase ) != self.num_rows:
raise ValueError(
"Column must be equal in length to the other columns in the matrix" )
if position is None:
UpperCAmelCase__ = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
UpperCAmelCase__ = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__(self : Any , __UpperCAmelCase : object ) -> bool:
"""simple docstring"""
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return NotImplemented
return self.rows == other.rows
def __ne__(self : int , __UpperCAmelCase : object ) -> bool:
"""simple docstring"""
return not self == other
def __neg__(self : Dict ) -> Matrix:
"""simple docstring"""
return self * -1
def __add__(self : Dict , __UpperCAmelCase : Matrix ) -> Matrix:
"""simple docstring"""
if self.order != other.order:
raise ValueError("Addition requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__(self : Optional[Any] , __UpperCAmelCase : Matrix ) -> Matrix:
"""simple docstring"""
if self.order != other.order:
raise ValueError("Subtraction requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__(self : Tuple , __UpperCAmelCase : Matrix | int | float ) -> Matrix:
"""simple docstring"""
if isinstance(__UpperCAmelCase , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
if self.num_columns != other.num_rows:
raise ValueError(
"The number of columns in the first matrix must "
"be equal to the number of rows in the second" )
return Matrix(
[
[Matrix.dot_product(__UpperCAmelCase , __UpperCAmelCase ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
"A Matrix can only be multiplied by an int, float, or another matrix" )
def __pow__(self : List[Any] , __UpperCAmelCase : int ) -> Matrix:
"""simple docstring"""
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("A Matrix can only be raised to the power of an int" )
if not self.is_square:
raise ValueError("Only square matrices can be raised to a power" )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"Only invertable matrices can be raised to a negative power" )
UpperCAmelCase__ = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def lowercase_ (cls : Dict , __UpperCAmelCase : list[int] , __UpperCAmelCase : list[int] ) -> int:
"""simple docstring"""
return sum(row[i] * column[i] for i in range(len(__UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 | 0 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class UpperCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase__ : str = 'Speech2TextFeatureExtractor'
UpperCamelCase__ : int = 'Speech2TextTokenizer'
def __init__( self , _A , _A ):
'''simple docstring'''
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
__SCREAMING_SNAKE_CASE = self.feature_extractor
__SCREAMING_SNAKE_CASE = False
def __call__( self , *_A , **_A ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*__UpperCAmelCase , **__UpperCAmelCase )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
__SCREAMING_SNAKE_CASE = kwargs.pop('raw_speech' )
else:
__SCREAMING_SNAKE_CASE = kwargs.pop('audio' , __UpperCAmelCase )
__SCREAMING_SNAKE_CASE = kwargs.pop('sampling_rate' , __UpperCAmelCase )
__SCREAMING_SNAKE_CASE = kwargs.pop('text' , __UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
__SCREAMING_SNAKE_CASE = args[0]
__SCREAMING_SNAKE_CASE = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
__SCREAMING_SNAKE_CASE = self.feature_extractor(__UpperCAmelCase , *__UpperCAmelCase , sampling_rate=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None:
__SCREAMING_SNAKE_CASE = self.tokenizer(__UpperCAmelCase , **__UpperCAmelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__SCREAMING_SNAKE_CASE = encodings['input_ids']
return inputs
def _A ( self , *_A , **_A ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def _A ( self , *_A , **_A ):
'''simple docstring'''
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@contextmanager
def _A ( self ):
'''simple docstring'''
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = self.tokenizer
yield
__SCREAMING_SNAKE_CASE = self.feature_extractor
__SCREAMING_SNAKE_CASE = False
| 257 | import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'vocab_file': 'vocab.json',
'tokenizer_config_file': 'tokenizer_config.json',
'merges_file': 'merges.txt',
}
UpperCamelCase__ = {
'vocab_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'
),
},
'tokenizer_config_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'
),
},
'merges_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'
),
},
}
UpperCamelCase__ = '</w>'
UpperCamelCase__ = '@@ '
def lowerCAmelCase_ ( __A ) -> str:
'''simple docstring'''
UpperCAmelCase__ = set()
UpperCAmelCase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase__ = char
return pairs
# Speech2Text2 has no max input length
UpperCamelCase__ = {'facebook/s2t-wav2vec2-large-en-de': 1_0_2_4}
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : str = VOCAB_FILES_NAMES
__UpperCAmelCase : str = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Dict = ['input_ids', 'attention_mask']
def __init__(self : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict="<s>" , __UpperCAmelCase : Tuple="<pad>" , __UpperCAmelCase : str="</s>" , __UpperCAmelCase : int="<unk>" , __UpperCAmelCase : List[str]=False , __UpperCAmelCase : str=None , **__UpperCAmelCase : Optional[Any] , ) -> Tuple:
"""simple docstring"""
super().__init__(
unk_token=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , **__UpperCAmelCase , )
UpperCAmelCase__ = do_lower_case
with open(__UpperCAmelCase , encoding="utf-8" ) as vocab_handle:
UpperCAmelCase__ = json.load(__UpperCAmelCase )
UpperCAmelCase__ = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""" )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
else:
with open(__UpperCAmelCase , encoding="utf-8" ) as merges_handle:
UpperCAmelCase__ = merges_handle.read().split("\n" )[:-1]
UpperCAmelCase__ = [tuple(merge.split()[:2] ) for merge in merges]
UpperCAmelCase__ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
UpperCAmelCase__ = {}
@property
def lowercase_ (self : List[str] ) -> int:
"""simple docstring"""
return len(self.decoder )
def lowercase_ (self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase_ (self : Dict , __UpperCAmelCase : Union[str, Any] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
UpperCAmelCase__ = get_pairs(__UpperCAmelCase )
if not pairs:
return token
while True:
UpperCAmelCase__ = min(__UpperCAmelCase , key=lambda __UpperCAmelCase : self.bpe_ranks.get(__UpperCAmelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase__ , UpperCAmelCase__ = bigram
UpperCAmelCase__ = []
UpperCAmelCase__ = 0
while i < len(__UpperCAmelCase ):
try:
UpperCAmelCase__ = word.index(__UpperCAmelCase , __UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase__ = j
if word[i] == first and i < len(__UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase__ = tuple(__UpperCAmelCase )
UpperCAmelCase__ = new_word
if len(__UpperCAmelCase ) == 1:
break
else:
UpperCAmelCase__ = get_pairs(__UpperCAmelCase )
UpperCAmelCase__ = " ".join(__UpperCAmelCase )
if word == "\n " + BPE_TOKEN_MERGES:
UpperCAmelCase__ = "\n" + BPE_TOKEN_MERGES
if word.endswith(__UpperCAmelCase ):
UpperCAmelCase__ = word.replace(__UpperCAmelCase , "" )
UpperCAmelCase__ = word.replace(" " , __UpperCAmelCase )
UpperCAmelCase__ = word
return word
def lowercase_ (self : Tuple , __UpperCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
if self.bpe_ranks is None:
raise ValueError(
"This tokenizer was instantiated without a `merges.txt` file, so"
" that it can only be used for decoding, not for encoding."
"Make sure to provide `merges.txt` file at instantiation to enable "
"encoding." )
if self.do_lower_case:
UpperCAmelCase__ = text.lower()
UpperCAmelCase__ = text.split()
UpperCAmelCase__ = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(__UpperCAmelCase ).split(" " ) ) )
return split_tokens
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : str ) -> int:
"""simple docstring"""
return self.encoder.get(__UpperCAmelCase , self.encoder.get(self.unk_token ) )
def lowercase_ (self : Any , __UpperCAmelCase : int ) -> str:
"""simple docstring"""
UpperCAmelCase__ = self.decoder.get(__UpperCAmelCase , self.unk_token )
return result
def lowercase_ (self : Dict , __UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = " ".join(__UpperCAmelCase )
# make sure @@ tokens are concatenated
UpperCAmelCase__ = "".join(string.split(__UpperCAmelCase ) )
return string
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase__ = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase__ = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__UpperCAmelCase , ensure_ascii=__UpperCAmelCase ) + "\n" )
UpperCAmelCase__ = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
UpperCAmelCase__ = token_index
writer.write(" ".join(__UpperCAmelCase ) + "\n" )
index += 1
return (vocab_file, merges_file)
| 65 | 0 |
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
__snake_case = """%20""".join(argv[1:]) if len(argv) > 1 else quote(str(input("""Search: """)))
print("""Googling.....""")
__snake_case = F'''https://www.google.com/search?q={query}&num=100'''
__snake_case = requests.get(
url,
headers={"""User-Agent""": str(UserAgent().random)},
)
try:
__snake_case = (
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """yuRUbf"""})
.find("""a""")
.get("""href""")
)
except AttributeError:
__snake_case = parse_qs(
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """kCrYT"""})
.find("""a""")
.get("""href""")
)["""url"""][0]
webbrowser.open(link)
| 203 | from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : torch.FloatTensor
class A ( nn.Module ):
def __init__(self : Union[str, Any] , __UpperCAmelCase : int=3 , __UpperCAmelCase : Dict=3 , __UpperCAmelCase : Optional[Any]=("DownEncoderBlock2D",) , __UpperCAmelCase : int=(6_4,) , __UpperCAmelCase : Union[str, Any]=2 , __UpperCAmelCase : Any=3_2 , __UpperCAmelCase : str="silu" , __UpperCAmelCase : Any=True , ) -> Dict:
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = layers_per_block
UpperCAmelCase__ = torch.nn.Convad(
__UpperCAmelCase , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
UpperCAmelCase__ = None
UpperCAmelCase__ = nn.ModuleList([] )
# down
UpperCAmelCase__ = block_out_channels[0]
for i, down_block_type in enumerate(__UpperCAmelCase ):
UpperCAmelCase__ = output_channel
UpperCAmelCase__ = block_out_channels[i]
UpperCAmelCase__ = i == len(__UpperCAmelCase ) - 1
UpperCAmelCase__ = get_down_block(
__UpperCAmelCase , num_layers=self.layers_per_block , in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=__UpperCAmelCase , resnet_groups=__UpperCAmelCase , attention_head_dim=__UpperCAmelCase , temb_channels=__UpperCAmelCase , )
self.down_blocks.append(__UpperCAmelCase )
# mid
UpperCAmelCase__ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=__UpperCAmelCase , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=__UpperCAmelCase , temb_channels=__UpperCAmelCase , )
# out
UpperCAmelCase__ = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=__UpperCAmelCase , eps=1E-6 )
UpperCAmelCase__ = nn.SiLU()
UpperCAmelCase__ = 2 * out_channels if double_z else out_channels
UpperCAmelCase__ = nn.Convad(block_out_channels[-1] , __UpperCAmelCase , 3 , padding=1 )
UpperCAmelCase__ = False
def lowercase_ (self : List[Any] , __UpperCAmelCase : int ) -> str:
"""simple docstring"""
UpperCAmelCase__ = x
UpperCAmelCase__ = self.conv_in(__UpperCAmelCase )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__UpperCAmelCase : int ):
def custom_forward(*__UpperCAmelCase : Optional[Any] ):
return module(*__UpperCAmelCase )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
UpperCAmelCase__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(__UpperCAmelCase ) , __UpperCAmelCase , use_reentrant=__UpperCAmelCase )
# middle
UpperCAmelCase__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __UpperCAmelCase , use_reentrant=__UpperCAmelCase )
else:
for down_block in self.down_blocks:
UpperCAmelCase__ = torch.utils.checkpoint.checkpoint(create_custom_forward(__UpperCAmelCase ) , __UpperCAmelCase )
# middle
UpperCAmelCase__ = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , __UpperCAmelCase )
else:
# down
for down_block in self.down_blocks:
UpperCAmelCase__ = down_block(__UpperCAmelCase )
# middle
UpperCAmelCase__ = self.mid_block(__UpperCAmelCase )
# post-process
UpperCAmelCase__ = self.conv_norm_out(__UpperCAmelCase )
UpperCAmelCase__ = self.conv_act(__UpperCAmelCase )
UpperCAmelCase__ = self.conv_out(__UpperCAmelCase )
return sample
class A ( nn.Module ):
def __init__(self : List[Any] , __UpperCAmelCase : str=3 , __UpperCAmelCase : Union[str, Any]=3 , __UpperCAmelCase : Optional[int]=("UpDecoderBlock2D",) , __UpperCAmelCase : str=(6_4,) , __UpperCAmelCase : Optional[Any]=2 , __UpperCAmelCase : Tuple=3_2 , __UpperCAmelCase : Any="silu" , __UpperCAmelCase : Any="group" , ) -> Dict:
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = layers_per_block
UpperCAmelCase__ = nn.Convad(
__UpperCAmelCase , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
UpperCAmelCase__ = None
UpperCAmelCase__ = nn.ModuleList([] )
UpperCAmelCase__ = in_channels if norm_type == "spatial" else None
# mid
UpperCAmelCase__ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=__UpperCAmelCase , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=__UpperCAmelCase , temb_channels=__UpperCAmelCase , )
# up
UpperCAmelCase__ = list(reversed(__UpperCAmelCase ) )
UpperCAmelCase__ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__UpperCAmelCase ):
UpperCAmelCase__ = output_channel
UpperCAmelCase__ = reversed_block_out_channels[i]
UpperCAmelCase__ = i == len(__UpperCAmelCase ) - 1
UpperCAmelCase__ = get_up_block(
__UpperCAmelCase , num_layers=self.layers_per_block + 1 , in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , prev_output_channel=__UpperCAmelCase , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=__UpperCAmelCase , resnet_groups=__UpperCAmelCase , attention_head_dim=__UpperCAmelCase , temb_channels=__UpperCAmelCase , resnet_time_scale_shift=__UpperCAmelCase , )
self.up_blocks.append(__UpperCAmelCase )
UpperCAmelCase__ = output_channel
# out
if norm_type == "spatial":
UpperCAmelCase__ = SpatialNorm(block_out_channels[0] , __UpperCAmelCase )
else:
UpperCAmelCase__ = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=__UpperCAmelCase , eps=1E-6 )
UpperCAmelCase__ = nn.SiLU()
UpperCAmelCase__ = nn.Convad(block_out_channels[0] , __UpperCAmelCase , 3 , padding=1 )
UpperCAmelCase__ = False
def lowercase_ (self : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict=None ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = z
UpperCAmelCase__ = self.conv_in(__UpperCAmelCase )
UpperCAmelCase__ = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__UpperCAmelCase : str ):
def custom_forward(*__UpperCAmelCase : List[str] ):
return module(*__UpperCAmelCase )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
UpperCAmelCase__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __UpperCAmelCase , __UpperCAmelCase , use_reentrant=__UpperCAmelCase )
UpperCAmelCase__ = sample.to(__UpperCAmelCase )
# up
for up_block in self.up_blocks:
UpperCAmelCase__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(__UpperCAmelCase ) , __UpperCAmelCase , __UpperCAmelCase , use_reentrant=__UpperCAmelCase )
else:
# middle
UpperCAmelCase__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = sample.to(__UpperCAmelCase )
# up
for up_block in self.up_blocks:
UpperCAmelCase__ = torch.utils.checkpoint.checkpoint(create_custom_forward(__UpperCAmelCase ) , __UpperCAmelCase , __UpperCAmelCase )
else:
# middle
UpperCAmelCase__ = self.mid_block(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = sample.to(__UpperCAmelCase )
# up
for up_block in self.up_blocks:
UpperCAmelCase__ = up_block(__UpperCAmelCase , __UpperCAmelCase )
# post-process
if latent_embeds is None:
UpperCAmelCase__ = self.conv_norm_out(__UpperCAmelCase )
else:
UpperCAmelCase__ = self.conv_norm_out(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = self.conv_act(__UpperCAmelCase )
UpperCAmelCase__ = self.conv_out(__UpperCAmelCase )
return sample
class A ( nn.Module ):
def __init__(self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Union[str, Any]="random" , __UpperCAmelCase : Dict=False , __UpperCAmelCase : Union[str, Any]=True ) -> Dict:
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = n_e
UpperCAmelCase__ = vq_embed_dim
UpperCAmelCase__ = beta
UpperCAmelCase__ = legacy
UpperCAmelCase__ = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
UpperCAmelCase__ = remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
UpperCAmelCase__ = self.used.shape[0]
UpperCAmelCase__ = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
UpperCAmelCase__ = self.re_embed
UpperCAmelCase__ = self.re_embed + 1
print(
f"""Remapping {self.n_e} indices to {self.re_embed} indices. """
f"""Using {self.unknown_index} for unknown indices.""" )
else:
UpperCAmelCase__ = n_e
UpperCAmelCase__ = sane_index_shape
def lowercase_ (self : str , __UpperCAmelCase : str ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = inds.shape
assert len(__UpperCAmelCase ) > 1
UpperCAmelCase__ = inds.reshape(ishape[0] , -1 )
UpperCAmelCase__ = self.used.to(__UpperCAmelCase )
UpperCAmelCase__ = (inds[:, :, None] == used[None, None, ...]).long()
UpperCAmelCase__ = match.argmax(-1 )
UpperCAmelCase__ = match.sum(2 ) < 1
if self.unknown_index == "random":
UpperCAmelCase__ = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
UpperCAmelCase__ = self.unknown_index
return new.reshape(__UpperCAmelCase )
def lowercase_ (self : Tuple , __UpperCAmelCase : Optional[int] ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = inds.shape
assert len(__UpperCAmelCase ) > 1
UpperCAmelCase__ = inds.reshape(ishape[0] , -1 )
UpperCAmelCase__ = self.used.to(__UpperCAmelCase )
if self.re_embed > self.used.shape[0]: # extra token
UpperCAmelCase__ = 0 # simply set to zero
UpperCAmelCase__ = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , __UpperCAmelCase )
return back.reshape(__UpperCAmelCase )
def lowercase_ (self : Optional[Any] , __UpperCAmelCase : Dict ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = z.permute(0 , 2 , 3 , 1 ).contiguous()
UpperCAmelCase__ = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
UpperCAmelCase__ = torch.argmin(torch.cdist(__UpperCAmelCase , self.embedding.weight ) , dim=1 )
UpperCAmelCase__ = self.embedding(__UpperCAmelCase ).view(z.shape )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
# compute loss for embedding
if not self.legacy:
UpperCAmelCase__ = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
UpperCAmelCase__ = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
UpperCAmelCase__ = z + (z_q - z).detach()
# reshape back to match original input shape
UpperCAmelCase__ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
UpperCAmelCase__ = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
UpperCAmelCase__ = self.remap_to_used(__UpperCAmelCase )
UpperCAmelCase__ = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
UpperCAmelCase__ = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def lowercase_ (self : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] ) -> Any:
"""simple docstring"""
if self.remap is not None:
UpperCAmelCase__ = indices.reshape(shape[0] , -1 ) # add batch axis
UpperCAmelCase__ = self.unmap_to_all(__UpperCAmelCase )
UpperCAmelCase__ = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
UpperCAmelCase__ = self.embedding(__UpperCAmelCase )
if shape is not None:
UpperCAmelCase__ = z_q.view(__UpperCAmelCase )
# reshape back to match original input shape
UpperCAmelCase__ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class A ( UpperCAmelCase_ ):
def __init__(self : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : str=False ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = parameters
UpperCAmelCase__ , UpperCAmelCase__ = torch.chunk(__UpperCAmelCase , 2 , dim=1 )
UpperCAmelCase__ = torch.clamp(self.logvar , -30.0 , 20.0 )
UpperCAmelCase__ = deterministic
UpperCAmelCase__ = torch.exp(0.5 * self.logvar )
UpperCAmelCase__ = torch.exp(self.logvar )
if self.deterministic:
UpperCAmelCase__ = UpperCAmelCase__ = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : Optional[torch.Generator] = None ) -> torch.FloatTensor:
"""simple docstring"""
UpperCAmelCase__ = randn_tensor(
self.mean.shape , generator=__UpperCAmelCase , device=self.parameters.device , dtype=self.parameters.dtype )
UpperCAmelCase__ = self.mean + self.std * sample
return x
def lowercase_ (self : str , __UpperCAmelCase : int=None ) -> Any:
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def lowercase_ (self : Dict , __UpperCAmelCase : Tuple , __UpperCAmelCase : Any=[1, 2, 3] ) -> Dict:
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
UpperCAmelCase__ = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=__UpperCAmelCase )
def lowercase_ (self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return self.mean
| 65 | 0 |
"""simple docstring"""
def __lowercase ( _a , _a , _a , _a ):
snake_case_, snake_case_ : List[Any] = len(__A ), len(grid[0] )
if (
min(__A , __A ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
snake_case_ : Dict = 0
count += depth_first_search(__A , row + 1 , __A , __A )
count += depth_first_search(__A , row - 1 , __A , __A )
count += depth_first_search(__A , __A , col + 1 , __A )
count += depth_first_search(__A , __A , col - 1 , __A )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 264 | import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def lowerCAmelCase_ ( __A, __A=False ) -> Any:
'''simple docstring'''
try:
UpperCAmelCase__ = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCAmelCase__ = default
else:
# KEY is set, convert it to True or False.
try:
UpperCAmelCase__ = strtobool(__A )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"""If set, {key} must be yes or no.""" )
return _value
UpperCamelCase__ = parse_flag_from_env('RUN_SLOW', default=False)
UpperCamelCase__ = parse_flag_from_env('RUN_REMOTE', default=False)
UpperCamelCase__ = parse_flag_from_env('RUN_LOCAL', default=True)
UpperCamelCase__ = parse_flag_from_env('RUN_PACKAGED', default=True)
# Compression
UpperCamelCase__ = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4')
UpperCamelCase__ = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr')
UpperCamelCase__ = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard')
# Audio
UpperCamelCase__ = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'),
reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ',
)
# Beam
UpperCamelCase__ = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'),
reason='test requires apache-beam and a compatible dill version',
)
# Dill-cloudpickle compatibility
UpperCamelCase__ = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('0.3.2'),
reason='test requires dill>0.3.2 for cloudpickle compatibility',
)
# Windows
UpperCamelCase__ = pytest.mark.skipif(
sys.platform == 'win32',
reason='test should not be run on Windows',
)
def lowerCAmelCase_ ( __A ) -> Any:
'''simple docstring'''
try:
import faiss # noqa
except ImportError:
UpperCAmelCase__ = unittest.skip("test requires faiss" )(__A )
return test_case
def lowerCAmelCase_ ( __A ) -> Optional[Any]:
'''simple docstring'''
try:
import regex # noqa
except ImportError:
UpperCAmelCase__ = unittest.skip("test requires regex" )(__A )
return test_case
def lowerCAmelCase_ ( __A ) -> List[str]:
'''simple docstring'''
try:
import elasticsearch # noqa
except ImportError:
UpperCAmelCase__ = unittest.skip("test requires elasticsearch" )(__A )
return test_case
def lowerCAmelCase_ ( __A ) -> List[Any]:
'''simple docstring'''
try:
import sqlalchemy # noqa
except ImportError:
UpperCAmelCase__ = unittest.skip("test requires sqlalchemy" )(__A )
return test_case
def lowerCAmelCase_ ( __A ) -> List[str]:
'''simple docstring'''
if not config.TORCH_AVAILABLE:
UpperCAmelCase__ = unittest.skip("test requires PyTorch" )(__A )
return test_case
def lowerCAmelCase_ ( __A ) -> Union[str, Any]:
'''simple docstring'''
if not config.TF_AVAILABLE:
UpperCAmelCase__ = unittest.skip("test requires TensorFlow" )(__A )
return test_case
def lowerCAmelCase_ ( __A ) -> Any:
'''simple docstring'''
if not config.JAX_AVAILABLE:
UpperCAmelCase__ = unittest.skip("test requires JAX" )(__A )
return test_case
def lowerCAmelCase_ ( __A ) -> int:
'''simple docstring'''
if not config.PIL_AVAILABLE:
UpperCAmelCase__ = unittest.skip("test requires Pillow" )(__A )
return test_case
def lowerCAmelCase_ ( __A ) -> Tuple:
'''simple docstring'''
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("test requires transformers" )(__A )
else:
return test_case
def lowerCAmelCase_ ( __A ) -> Dict:
'''simple docstring'''
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("test requires tiktoken" )(__A )
else:
return test_case
def lowerCAmelCase_ ( __A ) -> Optional[Any]:
'''simple docstring'''
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("test requires spacy" )(__A )
else:
return test_case
def lowerCAmelCase_ ( __A ) -> Optional[int]:
'''simple docstring'''
def _require_spacy_model(__A ):
try:
import spacy # noqa F401
spacy.load(__A )
except ImportError:
return unittest.skip("test requires spacy" )(__A )
except OSError:
return unittest.skip("test requires spacy model '{}'".format(__A ) )(__A )
else:
return test_case
return _require_spacy_model
def lowerCAmelCase_ ( __A ) -> Optional[Any]:
'''simple docstring'''
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("test requires pyspark" )(__A )
else:
return test_case
def lowerCAmelCase_ ( __A ) -> Tuple:
'''simple docstring'''
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("test requires joblibspark" )(__A )
else:
return test_case
def lowerCAmelCase_ ( __A ) -> Optional[int]:
'''simple docstring'''
if not _run_slow_tests or _run_slow_tests == 0:
UpperCAmelCase__ = unittest.skip("test is slow" )(__A )
return test_case
def lowerCAmelCase_ ( __A ) -> List[Any]:
'''simple docstring'''
if not _run_local_tests or _run_local_tests == 0:
UpperCAmelCase__ = unittest.skip("test is local" )(__A )
return test_case
def lowerCAmelCase_ ( __A ) -> Optional[Any]:
'''simple docstring'''
if not _run_packaged_tests or _run_packaged_tests == 0:
UpperCAmelCase__ = unittest.skip("test is packaged" )(__A )
return test_case
def lowerCAmelCase_ ( __A ) -> Any:
'''simple docstring'''
if not _run_remote_tests or _run_remote_tests == 0:
UpperCAmelCase__ = unittest.skip("test requires remote" )(__A )
return test_case
def lowerCAmelCase_ ( *__A ) -> Optional[int]:
'''simple docstring'''
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(__A ) and name.startswith("test" ):
for decorator in decorators:
UpperCAmelCase__ = decorator(__A )
setattr(cls, __A, __A )
return cls
return decorate
class A ( UpperCAmelCase_ ):
pass
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Union[str, Any] = 0
__UpperCAmelCase : str = 1
__UpperCAmelCase : int = 2
@contextmanager
def lowerCAmelCase_ ( __A=OfflineSimulationMode.CONNECTION_FAILS, __A=1e-16 ) -> List[str]:
'''simple docstring'''
UpperCAmelCase__ = requests.Session().request
def timeout_request(__A, __A, __A, **__A ):
# Change the url to an invalid url so that the connection hangs
UpperCAmelCase__ = "https://10.255.255.1"
if kwargs.get("timeout" ) is None:
raise RequestWouldHangIndefinitelyError(
f"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
UpperCAmelCase__ = timeout
try:
return online_request(__A, __A, **__A )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
UpperCAmelCase__ = url
UpperCAmelCase__ = e.args[0]
UpperCAmelCase__ = (max_retry_error.args[0].replace("10.255.255.1", f"""OfflineMock[{url}]""" ),)
UpperCAmelCase__ = (max_retry_error,)
raise
def raise_connection_error(__A, __A, **__A ):
raise requests.ConnectionError("Offline mode is enabled.", request=__A )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("requests.Session.send", __A ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("requests.Session.request", __A ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("datasets.config.HF_DATASETS_OFFLINE", __A ):
yield
else:
raise ValueError("Please use a value from the OfflineSimulationMode enum." )
@contextmanager
def lowerCAmelCase_ ( *__A, **__A ) -> str:
'''simple docstring'''
UpperCAmelCase__ = str(Path().resolve() )
with tempfile.TemporaryDirectory(*__A, **__A ) as tmp_dir:
try:
os.chdir(__A )
yield
finally:
os.chdir(__A )
@contextmanager
def lowerCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
import gc
gc.collect()
UpperCAmelCase__ = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def lowerCAmelCase_ ( ) -> List[str]:
'''simple docstring'''
import gc
gc.collect()
UpperCAmelCase__ = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def lowerCAmelCase_ ( __A, __A ) -> List[str]:
'''simple docstring'''
return deepcopy(__A ).integers(0, 100, 10 ).tolist() == deepcopy(__A ).integers(0, 100, 10 ).tolist()
def lowerCAmelCase_ ( __A ) -> Optional[int]:
'''simple docstring'''
import decorator
from requests.exceptions import HTTPError
def _wrapper(__A, *__A, **__A ):
try:
return func(*__A, **__A )
except HTTPError as err:
if str(__A ).startswith("500" ) or str(__A ).startswith("502" ):
pytest.xfail(str(__A ) )
raise err
return decorator.decorator(_wrapper, __A )
class A :
def __init__(self : Optional[Any] , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : List[str] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = returncode
UpperCAmelCase__ = stdout
UpperCAmelCase__ = stderr
async def lowerCAmelCase_ ( __A, __A ) -> Optional[int]:
'''simple docstring'''
while True:
UpperCAmelCase__ = await stream.readline()
if line:
callback(__A )
else:
break
async def lowerCAmelCase_ ( __A, __A=None, __A=None, __A=None, __A=False, __A=False ) -> _RunOutput:
'''simple docstring'''
if echo:
print("\nRunning: ", " ".join(__A ) )
UpperCAmelCase__ = await asyncio.create_subprocess_exec(
cmd[0], *cmd[1:], stdin=__A, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, env=__A, )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCAmelCase__ = []
UpperCAmelCase__ = []
def tee(__A, __A, __A, __A="" ):
UpperCAmelCase__ = line.decode("utf-8" ).rstrip()
sink.append(__A )
if not quiet:
print(__A, __A, file=__A )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda __A : tee(__A, __A, sys.stdout, label="stdout:" ) ),
_read_stream(p.stderr, lambda __A : tee(__A, __A, sys.stderr, label="stderr:" ) ),
], timeout=__A, )
return _RunOutput(await p.wait(), __A, __A )
def lowerCAmelCase_ ( __A, __A=None, __A=None, __A=180, __A=False, __A=True ) -> _RunOutput:
'''simple docstring'''
UpperCAmelCase__ = asyncio.get_event_loop()
UpperCAmelCase__ = loop.run_until_complete(
_stream_subprocess(__A, env=__A, stdin=__A, timeout=__A, quiet=__A, echo=__A ) )
UpperCAmelCase__ = " ".join(__A )
if result.returncode > 0:
UpperCAmelCase__ = "\n".join(result.stderr )
raise RuntimeError(
f"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
f"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"""'{cmd_str}' produced no output.""" )
return result
def lowerCAmelCase_ ( ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ = os.environ.get("PYTEST_XDIST_WORKER", "gw0" )
UpperCAmelCase__ = re.sub(r"^gw", "", __A, 0, re.M )
return int(__A )
def lowerCAmelCase_ ( ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ = 29_500
UpperCAmelCase__ = pytest_xdist_worker_id()
return port + uniq_delta
| 65 | 0 |
from __future__ import annotations
class __UpperCAmelCase :
def __init__( self : Union[str, Any], __A : list[list[int]] ):
UpperCAmelCase : int = TypeError(
'''Matrices must be formed from a list of zero or more lists containing at '''
'''least one and the same number of values, each of which must be of type '''
'''int or float.''' )
if len(__UpperCAmelCase ) != 0:
UpperCAmelCase : Optional[Any] = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(__UpperCAmelCase ) != cols:
raise error
for value in row:
if not isinstance(__UpperCAmelCase, (int, float) ):
raise error
UpperCAmelCase : Any = rows
else:
UpperCAmelCase : Dict = []
def __magic_name__ ( self : Any ):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def __magic_name__ ( self : Any ):
return len(self.rows )
@property
def __magic_name__ ( self : Union[str, Any] ):
return len(self.rows[0] )
@property
def __magic_name__ ( self : List[Any] ):
return (self.num_rows, self.num_columns)
@property
def __magic_name__ ( self : Tuple ):
return self.order[0] == self.order[1]
def __magic_name__ ( self : Any ):
UpperCAmelCase : Tuple = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(__UpperCAmelCase )
def __magic_name__ ( self : int ):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def __magic_name__ ( self : Tuple ):
return bool(self.determinant() )
def __magic_name__ ( self : Dict, __A : int, __A : int ):
UpperCAmelCase : Dict = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(__UpperCAmelCase ).determinant()
def __magic_name__ ( self : int, __A : int, __A : int ):
if (row + column) % 2 == 0:
return self.get_minor(__UpperCAmelCase, __UpperCAmelCase )
return -1 * self.get_minor(__UpperCAmelCase, __UpperCAmelCase )
def __magic_name__ ( self : Union[str, Any] ):
return Matrix(
[
[self.get_minor(__UpperCAmelCase, __UpperCAmelCase ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def __magic_name__ ( self : List[str] ):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : Optional[Any] = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(__UpperCAmelCase )
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : Optional[int] = self.determinant()
if not determinant:
raise TypeError('''Only matrices with a non-zero determinant have an inverse''' )
return self.adjugate() * (1 / determinant)
def __repr__( self : Dict ):
return str(self.rows )
def __str__( self : Optional[Any] ):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'''[''' + '''. '''.join([str(__UpperCAmelCase ) for value in row] ) + '''.]'''
for row in self.rows
] )
+ "]"
)
def __magic_name__ ( self : Optional[int], __A : list[int], __A : int | None = None ):
UpperCAmelCase : Optional[Any] = TypeError('''Row must be a list containing all ints and/or floats''' )
if not isinstance(__UpperCAmelCase, __UpperCAmelCase ):
raise type_error
for value in row:
if not isinstance(__UpperCAmelCase, (int, float) ):
raise type_error
if len(__UpperCAmelCase ) != self.num_columns:
raise ValueError(
'''Row must be equal in length to the other rows in the matrix''' )
if position is None:
self.rows.append(__UpperCAmelCase )
else:
UpperCAmelCase : Optional[int] = self.rows[0:position] + [row] + self.rows[position:]
def __magic_name__ ( self : Union[str, Any], __A : list[int], __A : int | None = None ):
UpperCAmelCase : Optional[int] = TypeError(
'''Column must be a list containing all ints and/or floats''' )
if not isinstance(__UpperCAmelCase, __UpperCAmelCase ):
raise type_error
for value in column:
if not isinstance(__UpperCAmelCase, (int, float) ):
raise type_error
if len(__UpperCAmelCase ) != self.num_rows:
raise ValueError(
'''Column must be equal in length to the other columns in the matrix''' )
if position is None:
UpperCAmelCase : str = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
UpperCAmelCase : Tuple = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self : Any, __A : object ):
if not isinstance(__UpperCAmelCase, __UpperCAmelCase ):
return NotImplemented
return self.rows == other.rows
def __ne__( self : int, __A : object ):
return not self == other
def __neg__( self : Dict ):
return self * -1
def __add__( self : Dict, __A : Matrix ):
if self.order != other.order:
raise ValueError('''Addition requires matrices of the same order''' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self : Optional[Any], __A : Matrix ):
if self.order != other.order:
raise ValueError('''Subtraction requires matrices of the same order''' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self : Tuple, __A : Matrix | int | float ):
if isinstance(__UpperCAmelCase, (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(__UpperCAmelCase, __UpperCAmelCase ):
if self.num_columns != other.num_rows:
raise ValueError(
'''The number of columns in the first matrix must '''
'''be equal to the number of rows in the second''' )
return Matrix(
[
[Matrix.dot_product(__UpperCAmelCase, __UpperCAmelCase ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'''A Matrix can only be multiplied by an int, float, or another matrix''' )
def __pow__( self : List[Any], __A : int ):
if not isinstance(__UpperCAmelCase, __UpperCAmelCase ):
raise TypeError('''A Matrix can only be raised to the power of an int''' )
if not self.is_square:
raise ValueError('''Only square matrices can be raised to a power''' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'''Only invertable matrices can be raised to a negative power''' )
UpperCAmelCase : Optional[Any] = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def __magic_name__ ( cls : Dict, __A : list[int], __A : list[int] ):
return sum(row[i] * column[i] for i in range(len(__UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336 | def lowerCAmelCase_ ( __A, __A ) -> float:
'''simple docstring'''
def get_matched_characters(__A, __A ) -> str:
UpperCAmelCase__ = []
UpperCAmelCase__ = min(len(_stra ), len(_stra ) ) // 2
for i, l in enumerate(_stra ):
UpperCAmelCase__ = int(max(0, i - limit ) )
UpperCAmelCase__ = int(min(i + limit + 1, len(_stra ) ) )
if l in _stra[left:right]:
matched.append(__A )
UpperCAmelCase__ = f"""{_stra[0:_stra.index(__A )]} {_stra[_stra.index(__A ) + 1:]}"""
return "".join(__A )
# matching characters
UpperCAmelCase__ = get_matched_characters(__A, __A )
UpperCAmelCase__ = get_matched_characters(__A, __A )
UpperCAmelCase__ = len(__A )
# transposition
UpperCAmelCase__ = (
len([(ca, ca) for ca, ca in zip(__A, __A ) if ca != ca] ) // 2
)
if not match_count:
UpperCAmelCase__ = 0.0
else:
UpperCAmelCase__ = (
1
/ 3
* (
match_count / len(__A )
+ match_count / len(__A )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
UpperCAmelCase__ = 0
for ca, ca in zip(stra[:4], stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 65 | 0 |
"""simple docstring"""
def _lowerCamelCase( a , a ):
while b:
__a , __a = b, a % b
return a
def _lowerCamelCase( a , a ):
return a if b == 0 else euclidean_gcd_recursive(__A , a % b )
def _lowerCamelCase( ):
print(F"euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}" )
print(F"euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}" )
print(F"euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}" )
print(F"euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}" )
print(F"euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}" )
print(F"euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}" )
print(F"euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}" )
print(F"euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}" )
print(F"euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}" )
print(F"euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}" )
if __name__ == "__main__":
main()
| 261 | def lowerCAmelCase_ ( __A, __A ) -> None:
'''simple docstring'''
UpperCAmelCase__ = len(__A )
print("The following activities are selected:" )
# The first activity is always selected
UpperCAmelCase__ = 0
print(__A, end="," )
# Consider rest of the activities
for j in range(__A ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(__A, end="," )
UpperCAmelCase__ = j
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ = [1, 3, 0, 5, 8, 5]
UpperCamelCase__ = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 65 | 0 |
"""simple docstring"""
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : Tuple = [[0 for _ in range(__A )] for _ in range(m + 1 )]
for i in range(m + 1 ):
_a : Tuple = 1
for n in range(m + 1 ):
for k in range(1 , __A ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
_snake_case = int(input('Enter a number: ').strip())
print(partition(n))
except ValueError:
print('Please enter a number.')
else:
try:
_snake_case = int(sys.argv[1])
print(partition(n))
except ValueError:
print('Please pass a number.')
| 294 | import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
UpperCamelCase__ = 'base_with_context'
def lowerCAmelCase_ ( __A, __A ) -> int:
'''simple docstring'''
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights["token_embedder"]["embedding"] ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ), requires_grad=__A )
for lyr_num, lyr in enumerate(model.encoders ):
UpperCAmelCase__ = weights[f"""layers_{lyr_num}"""]
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
UpperCAmelCase__ = ly_weight["attention"]
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def lowerCAmelCase_ ( __A, __A ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights["input_proj"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ), requires_grad=__A )
for lyr_num, lyr in enumerate(model.encoders ):
UpperCAmelCase__ = weights[f"""layers_{lyr_num}"""]
UpperCAmelCase__ = ly_weight["attention"]
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def lowerCAmelCase_ ( __A, __A ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights["time_emb_dense0"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights["time_emb_dense1"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ), requires_grad=__A )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(weights["continuous_inputs_projection"]["kernel"].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
UpperCAmelCase__ = weights[f"""layers_{lyr_num}"""]
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight["pre_self_attention_layer_norm"]["scale"] ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_0"]["DenseGeneral_0"]["kernel"].T ) )
UpperCAmelCase__ = ly_weight["self_attention"]
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
UpperCAmelCase__ = ly_weight["MultiHeadDotProductAttention_0"]
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight["pre_cross_attention_layer_norm"]["scale"] ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_1"]["DenseGeneral_0"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights["decoder_norm"]["scale"] ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights["spec_out_dense"]["kernel"].T ) )
return model
def lowerCAmelCase_ ( __A ) -> int:
'''simple docstring'''
UpperCAmelCase__ = checkpoints.load_tax_checkpoint(args.checkpoint_path )
UpperCAmelCase__ = jnp.tree_util.tree_map(onp.array, __A )
UpperCAmelCase__ = [
"from __gin__ import dynamic_registration",
"from music_spectrogram_diffusion.models.diffusion import diffusion_utils",
"diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0",
"diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()",
]
UpperCAmelCase__ = os.path.join(args.checkpoint_path, "..", "config.gin" )
UpperCAmelCase__ = inference.parse_training_gin_file(__A, __A )
UpperCAmelCase__ = inference.InferenceModel(args.checkpoint_path, __A )
UpperCAmelCase__ = DDPMScheduler(beta_schedule="squaredcos_cap_v2", variance_type="fixed_large" )
UpperCAmelCase__ = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["inputs"], vocab_size=synth_model.model.module.config.vocab_size, d_model=synth_model.model.module.config.emb_dim, dropout_rate=synth_model.model.module.config.dropout_rate, num_layers=synth_model.model.module.config.num_encoder_layers, num_heads=synth_model.model.module.config.num_heads, d_kv=synth_model.model.module.config.head_dim, d_ff=synth_model.model.module.config.mlp_dim, feed_forward_proj="gated-gelu", )
UpperCAmelCase__ = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims, targets_context_length=synth_model.sequence_length["targets_context"], d_model=synth_model.model.module.config.emb_dim, dropout_rate=synth_model.model.module.config.dropout_rate, num_layers=synth_model.model.module.config.num_encoder_layers, num_heads=synth_model.model.module.config.num_heads, d_kv=synth_model.model.module.config.head_dim, d_ff=synth_model.model.module.config.mlp_dim, feed_forward_proj="gated-gelu", )
UpperCAmelCase__ = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims, targets_length=synth_model.sequence_length["targets_context"], max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time, d_model=synth_model.model.module.config.emb_dim, num_layers=synth_model.model.module.config.num_decoder_layers, num_heads=synth_model.model.module.config.num_heads, d_kv=synth_model.model.module.config.head_dim, d_ff=synth_model.model.module.config.mlp_dim, dropout_rate=synth_model.model.module.config.dropout_rate, )
UpperCAmelCase__ = load_notes_encoder(ta_checkpoint["target"]["token_encoder"], __A )
UpperCAmelCase__ = load_continuous_encoder(ta_checkpoint["target"]["continuous_encoder"], __A )
UpperCAmelCase__ = load_decoder(ta_checkpoint["target"]["decoder"], __A )
UpperCAmelCase__ = OnnxRuntimeModel.from_pretrained("kashif/soundstream_mel_decoder" )
UpperCAmelCase__ = SpectrogramDiffusionPipeline(
notes_encoder=__A, continuous_encoder=__A, decoder=__A, scheduler=__A, melgan=__A, )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument('--output_path', default=None, type=str, required=True, help='Path to the converted model.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument(
'--checkpoint_path',
default=f'''{MODEL}/checkpoint_500000''',
type=str,
required=False,
help='Path to the original jax model checkpoint.',
)
UpperCamelCase__ = parser.parse_args()
main(args)
| 65 | 0 |
"""simple docstring"""
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
if is_torch_version("""<""" , """2.0.0""" ) or not hasattr(__A , """_dynamo""" ):
return False
return isinstance(__A , torch._dynamo.eval_frame.OptimizedModule )
def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : int = True ):
"""simple docstring"""
_snake_case : Any = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
_snake_case : Optional[int] = is_compiled_module(__A )
if is_compiled:
_snake_case : Any = model
_snake_case : Optional[int] = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(__A , __A ):
_snake_case : Optional[int] = model.module
if not keep_fpaa_wrapper:
_snake_case : int = getattr(__A , """forward""" )
_snake_case : List[str] = model.__dict__.pop("""_original_forward""" , __A )
if original_forward is not None:
while hasattr(__A , """__wrapped__""" ):
_snake_case : Optional[Any] = forward.__wrapped__
if forward == original_forward:
break
_snake_case : int = forward
if getattr(__A , """_converted_to_transformer_engine""" , __A ):
convert_model(__A , to_transformer_engine=__A )
if is_compiled:
_snake_case : List[Any] = model
_snake_case : Optional[int] = compiled_model
return model
def UpperCAmelCase__ ():
"""simple docstring"""
PartialState().wait_for_everyone()
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] ):
"""simple docstring"""
if PartialState().distributed_type == DistributedType.TPU:
xm.save(__A , __A )
elif PartialState().local_process_index == 0:
torch.save(__A , __A )
@contextmanager
def UpperCAmelCase__ (**snake_case__ : Tuple ):
"""simple docstring"""
for key, value in kwargs.items():
_snake_case : Tuple = str(__A )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
if not hasattr(__A , """__qualname__""" ) and not hasattr(__A , """__name__""" ):
_snake_case : List[Any] = getattr(__A , """__class__""" , __A )
if hasattr(__A , """__qualname__""" ):
return obj.__qualname__
if hasattr(__A , """__name__""" ):
return obj.__name__
return str(__A )
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : str ):
"""simple docstring"""
for key, value in source.items():
if isinstance(__A , __A ):
_snake_case : Any = destination.setdefault(__A , {} )
merge_dicts(__A , __A )
else:
_snake_case : Optional[Any] = value
return destination
def UpperCAmelCase__ (snake_case__ : int = None ):
"""simple docstring"""
if port is None:
_snake_case : List[str] = 2_95_00
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(("""localhost""", port) ) == 0
| 64 | import math
def lowerCAmelCase_ ( __A ) -> bool:
'''simple docstring'''
return math.sqrt(__A ) * math.sqrt(__A ) == num
def lowerCAmelCase_ ( __A ) -> bool:
'''simple docstring'''
UpperCAmelCase__ = 0
UpperCAmelCase__ = n
while left <= right:
UpperCAmelCase__ = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
UpperCAmelCase__ = mid - 1
else:
UpperCAmelCase__ = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 | 0 |
"""simple docstring"""
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
_A : Any = False
try:
_A : List[Any] = _is_package_available("""google.colab""")
except ModuleNotFoundError:
pass
@input.register
class a__ :
def __init__( self , _a = None , _a = [] ):
lowercase : Tuple = 0
lowercase : Tuple = choices
lowercase : Optional[int] = prompt
if sys.platform == "win32":
lowercase : Optional[Any] = "*"
else:
lowercase : str = "➔ "
def __magic_name__ ( self , _a , _a = "" ):
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , __UpperCAmelCase )
else:
forceWrite(self.choices[index] , __UpperCAmelCase )
def __magic_name__ ( self , _a ):
if index == self.position:
forceWrite(f""" {self.arrow_char} """ )
self.write_choice(__UpperCAmelCase )
else:
forceWrite(f""" {self.choices[index]}""" )
reset_cursor()
def __magic_name__ ( self , _a , _a = 1 ):
lowercase : Dict = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(__UpperCAmelCase )
move_cursor(__UpperCAmelCase , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["up"] )
def __magic_name__ ( self ):
self.move_direction(Direction.UP )
@input.mark(KEYMAP["down"] )
def __magic_name__ ( self ):
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["newline"] )
def __magic_name__ ( self ):
move_cursor(len(self.choices ) - self.position , "DOWN" )
return self.position
@input.mark(KEYMAP["interrupt"] )
def __magic_name__ ( self ):
move_cursor(len(self.choices ) - self.position , "DOWN" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(__UpperCAmelCase )] for number in range(10 )] )
def __magic_name__ ( self ):
lowercase : Union[str, Any] = int(chr(self.current_selection ) )
lowercase : List[Any] = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , __UpperCAmelCase )
else:
return
else:
return
def __magic_name__ ( self , _a = 0 ):
if self.prompt:
linebreak()
forceWrite(self.prompt , "\n" )
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter" , "\n" )
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" , "\n" )
lowercase : Union[str, Any] = default_choice
for i in range(len(self.choices ) ):
self.print_choice(__UpperCAmelCase )
forceWrite("\n" )
move_cursor(len(self.choices ) - self.position , "UP" )
with cursor.hide():
while True:
if in_colab:
try:
lowercase : List[str] = int(builtins.input() )
except ValueError:
lowercase : Any = default_choice
else:
lowercase : List[Any] = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , "UP" )
clear_line()
self.write_choice(__UpperCAmelCase , "\n" )
return choice
| 202 | import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : torch.FloatTensor
__UpperCAmelCase : Optional[torch.FloatTensor] = None
def lowerCAmelCase_ ( __A, __A=0.999, __A="cosine", ) -> Tuple:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(__A ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__A ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
UpperCAmelCase__ = []
for i in range(__A ):
UpperCAmelCase__ = i / num_diffusion_timesteps
UpperCAmelCase__ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__A ) / alpha_bar_fn(__A ), __A ) )
return torch.tensor(__A, dtype=torch.floataa )
class A ( UpperCAmelCase_ , UpperCAmelCase_ ):
@register_to_config
def __init__(self : List[str] , __UpperCAmelCase : int = 1_0_0_0 , __UpperCAmelCase : str = "fixed_small_log" , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[float] = 1.0 , __UpperCAmelCase : str = "epsilon" , __UpperCAmelCase : str = "squaredcos_cap_v2" , ) -> Optional[int]:
"""simple docstring"""
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" )
UpperCAmelCase__ = betas_for_alpha_bar(__UpperCAmelCase )
UpperCAmelCase__ = 1.0 - self.betas
UpperCAmelCase__ = torch.cumprod(self.alphas , dim=0 )
UpperCAmelCase__ = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
UpperCAmelCase__ = 1.0
# setable values
UpperCAmelCase__ = None
UpperCAmelCase__ = torch.from_numpy(np.arange(0 , __UpperCAmelCase )[::-1].copy() )
UpperCAmelCase__ = variance_type
def lowercase_ (self : List[str] , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : Optional[int] = None ) -> torch.FloatTensor:
"""simple docstring"""
return sample
def lowercase_ (self : int , __UpperCAmelCase : int , __UpperCAmelCase : Union[str, torch.device] = None ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = num_inference_steps
UpperCAmelCase__ = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
UpperCAmelCase__ = (np.arange(0 , __UpperCAmelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa )
UpperCAmelCase__ = torch.from_numpy(__UpperCAmelCase ).to(__UpperCAmelCase )
def lowercase_ (self : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : List[str]=None ) -> Tuple:
"""simple docstring"""
if prev_timestep is None:
UpperCAmelCase__ = t - 1
UpperCAmelCase__ = self.alphas_cumprod[t]
UpperCAmelCase__ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
UpperCAmelCase__ = 1 - alpha_prod_t
UpperCAmelCase__ = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
UpperCAmelCase__ = self.betas[t]
else:
UpperCAmelCase__ = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCAmelCase__ = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
UpperCAmelCase__ = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
UpperCAmelCase__ = torch.log(torch.clamp(__UpperCAmelCase , min=1E-20 ) )
UpperCAmelCase__ = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
UpperCAmelCase__ = variance.log()
UpperCAmelCase__ = beta.log()
UpperCAmelCase__ = (predicted_variance + 1) / 2
UpperCAmelCase__ = frac * max_log + (1 - frac) * min_log
return variance
def lowercase_ (self : Optional[int] , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : int , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : bool = True , ) -> Union[UnCLIPSchedulerOutput, Tuple]:
"""simple docstring"""
UpperCAmelCase__ = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
UpperCAmelCase__ , UpperCAmelCase__ = torch.split(__UpperCAmelCase , sample.shape[1] , dim=1 )
else:
UpperCAmelCase__ = None
# 1. compute alphas, betas
if prev_timestep is None:
UpperCAmelCase__ = t - 1
UpperCAmelCase__ = self.alphas_cumprod[t]
UpperCAmelCase__ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
UpperCAmelCase__ = 1 - alpha_prod_t
UpperCAmelCase__ = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
UpperCAmelCase__ = self.betas[t]
UpperCAmelCase__ = self.alphas[t]
else:
UpperCAmelCase__ = 1 - alpha_prod_t / alpha_prod_t_prev
UpperCAmelCase__ = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCAmelCase__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCAmelCase__ = model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"""
" for the UnCLIPScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCAmelCase__ = torch.clamp(
__UpperCAmelCase , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
UpperCAmelCase__ = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
UpperCAmelCase__ = 0
if t > 0:
UpperCAmelCase__ = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=__UpperCAmelCase , device=model_output.device )
UpperCAmelCase__ = self._get_variance(
__UpperCAmelCase , predicted_variance=__UpperCAmelCase , prev_timestep=__UpperCAmelCase , )
if self.variance_type == "fixed_small_log":
UpperCAmelCase__ = variance
elif self.variance_type == "learned_range":
UpperCAmelCase__ = (0.5 * variance).exp()
else:
raise ValueError(
f"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"""
" for the UnCLIPScheduler." )
UpperCAmelCase__ = variance * variance_noise
UpperCAmelCase__ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=__UpperCAmelCase , pred_original_sample=__UpperCAmelCase )
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : torch.IntTensor , ) -> torch.FloatTensor:
"""simple docstring"""
UpperCAmelCase__ = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
UpperCAmelCase__ = timesteps.to(original_samples.device )
UpperCAmelCase__ = alphas_cumprod[timesteps] ** 0.5
UpperCAmelCase__ = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
UpperCAmelCase__ = sqrt_alpha_prod.unsqueeze(-1 )
UpperCAmelCase__ = (1 - alphas_cumprod[timesteps]) ** 0.5
UpperCAmelCase__ = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
UpperCAmelCase__ = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
UpperCAmelCase__ = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 65 | 0 |
"""simple docstring"""
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def snake_case ( A__ ):
UpperCAmelCase_ : Dict = filter(lambda A__ : p.requires_grad ,model.parameters() )
UpperCAmelCase_ : Dict = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowerCamelCase_ = logging.getLogger(__name__)
def snake_case ( A__ ,A__ ):
if metric == "rouge2":
UpperCAmelCase_ : Dict = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
UpperCAmelCase_ : Tuple = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
UpperCAmelCase_ : Optional[Any] = "{val_avg_em:.4f}-{step_count}"
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
" function." )
UpperCAmelCase_ : List[Any] = ModelCheckpoint(
dirpath=__A ,filename=__A ,monitor=F"""val_{metric}""" ,mode="max" ,save_top_k=3 ,every_n_epochs=1 ,)
return checkpoint_callback
def snake_case ( A__ ,A__ ):
return EarlyStopping(
monitor=F"""val_{metric}""" ,mode="min" if "loss" in metric else "max" ,patience=__A ,verbose=__A ,)
class UpperCamelCase_ (pl.Callback ):
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any] ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = {f"""lr_group_{i}""": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__UpperCAmelCase )
@rank_zero_only
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : pl.Trainer , lowerCAmelCase_ : pl.LightningModule , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any]=True ) -> None:
logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
UpperCAmelCase_ : List[str] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
UpperCAmelCase_ : List[str] = Path(pl_module.hparams.output_dir )
if type_path == "test":
UpperCAmelCase_ : Tuple = od / "test_results.txt"
UpperCAmelCase_ : Optional[Any] = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
UpperCAmelCase_ : Dict = od / f"""{type_path}_results/{trainer.global_step:05d}.txt"""
UpperCAmelCase_ : Union[str, Any] = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=__UpperCAmelCase )
generations_file.parent.mkdir(exist_ok=__UpperCAmelCase )
with open(__UpperCAmelCase , "a+" ) as writer:
for key in sorted(__UpperCAmelCase ):
if key in ["log", "progress_bar", "preds"]:
continue
UpperCAmelCase_ : Dict = metrics[key]
if isinstance(__UpperCAmelCase , torch.Tensor ):
UpperCAmelCase_ : Tuple = val.item()
UpperCAmelCase_ : int = f"""{key}: {val:.6f}\n"""
writer.write(__UpperCAmelCase )
if not save_generations:
return
if "preds" in metrics:
UpperCAmelCase_ : List[str] = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(__UpperCAmelCase )
@rank_zero_only
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] ) -> Optional[int]:
try:
UpperCAmelCase_ : int = pl_module.model.model.num_parameters()
except AttributeError:
UpperCAmelCase_ : str = pl_module.model.num_parameters()
UpperCAmelCase_ : Union[str, Any] = count_trainable_parameters(__UpperCAmelCase )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} )
@rank_zero_only
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : pl.Trainer , lowerCAmelCase_ : pl.LightningModule ) -> Union[str, Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__UpperCAmelCase , __UpperCAmelCase , "test" )
@rank_zero_only
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : pl.Trainer , lowerCAmelCase_ : Union[str, Any] ) -> int:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 268 | import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class A ( unittest.TestCase ):
def lowercase_ (self : Union[str, Any] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = inspect.getfile(accelerate.test_utils )
UpperCAmelCase__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
UpperCAmelCase__ = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] )
UpperCAmelCase__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] )
@require_multi_gpu
def lowercase_ (self : List[str] ) -> Any:
"""simple docstring"""
print(f"""Found {torch.cuda.device_count()} devices.""" )
UpperCAmelCase__ = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() )
@require_multi_gpu
def lowercase_ (self : str ) -> str:
"""simple docstring"""
print(f"""Found {torch.cuda.device_count()} devices.""" )
UpperCAmelCase__ = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path]
print(f"""Command: {cmd}""" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() )
@require_multi_gpu
def lowercase_ (self : Tuple ) -> int:
"""simple docstring"""
UpperCAmelCase__ = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() )
@require_multi_gpu
def lowercase_ (self : Dict ) -> str:
"""simple docstring"""
print(f"""Found {torch.cuda.device_count()} devices, using 2 devices only""" )
UpperCAmelCase__ = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() )
if __name__ == "__main__":
UpperCamelCase__ = Accelerator()
UpperCamelCase__ = (accelerator.state.process_index + 2, 1_0)
UpperCamelCase__ = torch.randint(0, 1_0, shape).to(accelerator.device)
UpperCamelCase__ = ''
UpperCamelCase__ = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
UpperCamelCase__ = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
UpperCamelCase__ = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 65 | 0 |
"""simple docstring"""
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
UpperCamelCase = set()
return any(
node not in visited and depth_first_search(__A , __A , __A , __A )
for node in graph )
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
visited.add(__A )
rec_stk.add(__A )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(__A , __A , __A , __A ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(__A )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 153 | import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def lowerCAmelCase_ ( __A ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
"decoder.output_projection.weight",
]
for k in ignore_keys:
state_dict.pop(__A, __A )
def lowerCAmelCase_ ( __A ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ = emb.weight.shape
UpperCAmelCase__ = nn.Linear(__A, __A, bias=__A )
UpperCAmelCase__ = emb.weight.data
return lin_layer
def lowerCAmelCase_ ( __A, __A="facebook/mbart-large-en-ro", __A=False, __A=False ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ = torch.load(__A, map_location="cpu" )["model"]
remove_ignore_keys_(__A )
UpperCAmelCase__ = state_dict["encoder.embed_tokens.weight"].shape[0]
UpperCAmelCase__ = MBartConfig.from_pretrained(__A, vocab_size=__A )
if mbart_aa and finetuned:
UpperCAmelCase__ = "relu"
UpperCAmelCase__ = state_dict["decoder.embed_tokens.weight"]
UpperCAmelCase__ = MBartForConditionalGeneration(__A )
model.model.load_state_dict(__A )
if finetuned:
UpperCAmelCase__ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config',
default='facebook/mbart-large-cc25',
type=str,
help='Which huggingface architecture to use: mbart-large',
)
parser.add_argument('--mbart_50', action='store_true', help='whether the model is mMART-50 checkpoint')
parser.add_argument('--finetuned', action='store_true', help='whether the model is a fine-tuned checkpoint')
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 65 | 0 |
from __future__ import annotations
def lowercase_ (A : int , A : int ):
snake_case__ : Any = []
snake_case__ : List[str] = []
snake_case__ : Tuple = 0
snake_case__ : List[Any] = sum(__A )
create_state_space_tree(__A , __A , __A , __A , __A , __A )
return result
def lowercase_ (A : Tuple , A : List[str] , A : Optional[Any] , A : List[Any] , A : Optional[int] , A : int , ):
if sum(__A ) > max_sum or (remaining_nums_sum + sum(__A )) < max_sum:
return
if sum(__A ) == max_sum:
result.append(__A )
return
for index in range(__A , len(__A ) ):
create_state_space_tree(
__A , __A , index + 1 , [*path, nums[index]] , __A , remaining_nums_sum - nums[index] , )
a_ :str = [3, 34, 4, 12, 5, 2]
a_ :Tuple = 9
a_ :Optional[Any] = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 277 | from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
UpperCamelCase__ = [
'python',
'tqdm',
'regex',
'requests',
'packaging',
'filelock',
'numpy',
'tokenizers',
'huggingface-hub',
'safetensors',
'accelerate',
'pyyaml',
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def lowerCAmelCase_ ( __A, __A=None ) -> Dict:
'''simple docstring'''
require_version(deps[pkg], __A )
| 65 | 0 |
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def __lowercase ( a__ ) -> Any:
if not is_accelerate_available():
return method
__SCREAMING_SNAKE_CASE = version.parse(accelerate.__version__ ).base_version
if version.parse(__A ) < version.parse('0.17.0' ):
return method
def wrapper(self , *a__ , **a__ ):
if hasattr(self , '_hf_hook' ) and hasattr(self._hf_hook , 'pre_forward' ):
self._hf_hook.pre_forward(self )
return method(self , *__A , **__A )
return wrapper
| 257 | import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
UpperCamelCase__ = logging.getLogger(__name__)
def lowerCAmelCase_ ( ) -> int:
'''simple docstring'''
UpperCAmelCase__ = argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." )
parser.add_argument("--file_path", type=__A, default="data/dump.txt", help="The path to the data." )
parser.add_argument("--tokenizer_type", type=__A, default="bert", choices=["bert", "roberta", "gpt2"] )
parser.add_argument("--tokenizer_name", type=__A, default="bert-base-uncased", help="The tokenizer to use." )
parser.add_argument("--dump_file", type=__A, default="data/dump", help="The dump file prefix." )
UpperCAmelCase__ = parser.parse_args()
logger.info(f"""Loading Tokenizer ({args.tokenizer_name})""" )
if args.tokenizer_type == "bert":
UpperCAmelCase__ = BertTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase__ = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
UpperCAmelCase__ = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
UpperCAmelCase__ = RobertaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase__ = tokenizer.special_tokens_map["cls_token"] # `<s>`
UpperCAmelCase__ = tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
UpperCAmelCase__ = GPTaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase__ = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
UpperCAmelCase__ = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(f"""Loading text from {args.file_path}""" )
with open(args.file_path, "r", encoding="utf8" ) as fp:
UpperCAmelCase__ = fp.readlines()
logger.info("Start encoding" )
logger.info(f"""{len(__A )} examples to process.""" )
UpperCAmelCase__ = []
UpperCAmelCase__ = 0
UpperCAmelCase__ = 10_000
UpperCAmelCase__ = time.time()
for text in data:
UpperCAmelCase__ = f"""{bos} {text.strip()} {sep}"""
UpperCAmelCase__ = tokenizer.encode(__A, add_special_tokens=__A )
rslt.append(__A )
iter += 1
if iter % interval == 0:
UpperCAmelCase__ = time.time()
logger.info(f"""{iter} examples processed. - {(end-start):.2f}s/{interval}expl""" )
UpperCAmelCase__ = time.time()
logger.info("Finished binarization" )
logger.info(f"""{len(__A )} examples processed.""" )
UpperCAmelCase__ = f"""{args.dump_file}.{args.tokenizer_name}.pickle"""
UpperCAmelCase__ = tokenizer.vocab_size
if vocab_size < (1 << 16):
UpperCAmelCase__ = [np.uintaa(__A ) for d in rslt]
else:
UpperCAmelCase__ = [np.intaa(__A ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f"""Dump to {dp_file}""" )
with open(__A, "wb" ) as handle:
pickle.dump(rslt_, __A, protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 65 | 0 |
"""simple docstring"""
def __lowerCAmelCase ( lowercase : List[str] ) -> list:
"""simple docstring"""
if len(__A ) <= 1:
return [tuple(__A )]
snake_case : Tuple = []
def generate(lowercase : str , lowercase : Any ):
snake_case : Optional[Any] = [0] * n
res.append(tuple(__A ) )
snake_case : Union[str, Any] = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
snake_case ,snake_case : List[Any] = arr[i], arr[0]
else:
snake_case ,snake_case : Dict = arr[i], arr[c[i]]
res.append(tuple(__A ) )
c[i] += 1
snake_case : Optional[int] = 0
else:
snake_case : str = 0
i += 1
generate(len(__A ) , __A )
return res
if __name__ == "__main__":
__snake_case = input("""Enter numbers separated by a comma:\n""").strip()
__snake_case = [int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 203 | from manim import *
class A ( UpperCAmelCase_ ):
def lowercase_ (self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = Rectangle(height=0.5 , width=0.5 )
UpperCAmelCase__ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCAmelCase__ = Rectangle(height=0.25 , width=0.25 )
UpperCAmelCase__ = [mem.copy() for i in range(6 )]
UpperCAmelCase__ = [mem.copy() for i in range(6 )]
UpperCAmelCase__ = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase__ = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase__ = VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase__ = Text("CPU" , font_size=2_4 )
UpperCAmelCase__ = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__UpperCAmelCase )
UpperCAmelCase__ = [mem.copy() for i in range(4 )]
UpperCAmelCase__ = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase__ = Text("GPU" , font_size=2_4 )
UpperCAmelCase__ = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(__UpperCAmelCase )
UpperCAmelCase__ = [mem.copy() for i in range(6 )]
UpperCAmelCase__ = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase__ = Text("Model" , font_size=2_4 )
UpperCAmelCase__ = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(__UpperCAmelCase )
UpperCAmelCase__ = []
UpperCAmelCase__ = []
for i, rect in enumerate(__UpperCAmelCase ):
UpperCAmelCase__ = fill.copy().set_fill(__UpperCAmelCase , opacity=0.8 )
target.move_to(__UpperCAmelCase )
model_arr.append(__UpperCAmelCase )
UpperCAmelCase__ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(__UpperCAmelCase , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(__UpperCAmelCase )
self.add(*__UpperCAmelCase , *__UpperCAmelCase )
UpperCAmelCase__ = [meta_mem.copy() for i in range(6 )]
UpperCAmelCase__ = [meta_mem.copy() for i in range(6 )]
UpperCAmelCase__ = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase__ = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase__ = VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase__ = Text("Disk" , font_size=2_4 )
UpperCAmelCase__ = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
disk.move_to([-4, -1.25, 0] )
self.add(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCAmelCase__ = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=1_8 , )
blue_text.next_to(__UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__UpperCAmelCase )
UpperCAmelCase__ = MarkupText(
f"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCAmelCase ) )
UpperCAmelCase__ = Square(0.3 )
input.set_fill(__UpperCAmelCase , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , __UpperCAmelCase , buff=0.5 )
self.play(Write(__UpperCAmelCase ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=__UpperCAmelCase , buff=0.02 )
self.play(MoveToTarget(__UpperCAmelCase ) )
self.play(FadeOut(__UpperCAmelCase ) )
UpperCAmelCase__ = Arrow(start=__UpperCAmelCase , end=__UpperCAmelCase , color=__UpperCAmelCase , buff=0.5 )
a.next_to(model_arr[0].get_left() , __UpperCAmelCase , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
UpperCAmelCase__ = MarkupText(
f"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCAmelCase , run_time=3 ) )
UpperCAmelCase__ = {"run_time": 1, "fade_in": True, "fade_out": True, "buff": 0.02}
self.play(
Write(__UpperCAmelCase ) , Circumscribe(model_arr[0] , color=__UpperCAmelCase , **__UpperCAmelCase ) , Circumscribe(model_cpu_arr[0] , color=__UpperCAmelCase , **__UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=__UpperCAmelCase , **__UpperCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
UpperCAmelCase__ = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , __UpperCAmelCase , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
UpperCAmelCase__ = AnimationGroup(
FadeOut(__UpperCAmelCase , run_time=0.5 ) , MoveToTarget(__UpperCAmelCase , run_time=0.5 ) , FadeIn(__UpperCAmelCase , run_time=0.5 ) , lag_ratio=0.2 )
self.play(__UpperCAmelCase )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
UpperCAmelCase__ = 0.7
self.play(
Circumscribe(model_arr[i] , **__UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i] , **__UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=__UpperCAmelCase , **__UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=__UpperCAmelCase , **__UpperCAmelCase ) , Circumscribe(model_arr[i + 1] , color=__UpperCAmelCase , **__UpperCAmelCase ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=__UpperCAmelCase , **__UpperCAmelCase ) , Circumscribe(cpu_left_col_base[-1] , color=__UpperCAmelCase , **__UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=__UpperCAmelCase , **__UpperCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
UpperCAmelCase__ = a_c
UpperCAmelCase__ = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(__UpperCAmelCase ) , FadeOut(__UpperCAmelCase , run_time=0.5 ) , )
UpperCAmelCase__ = MarkupText(f"""Inference on a model too large for GPU memory\nis successfully completed.""" , font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCAmelCase , run_time=3 ) , MoveToTarget(__UpperCAmelCase ) )
self.wait()
| 65 | 0 |
"""simple docstring"""
from collections import defaultdict
from math import gcd
def __lowercase ( _a = 1_500_000 ):
snake_case_ : Union[str, Any] = defaultdict(__A )
snake_case_ : Dict = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , __A , 2 ):
if gcd(__A , __A ) > 1:
continue
snake_case_ : Optional[int] = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(__A , limit + 1 , __A ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(f'{solution() = }')
| 264 | from __future__ import annotations
from scipy.special import comb # type: ignore
class A :
def __init__(self : List[Any] , __UpperCAmelCase : list[tuple[float, float]] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
UpperCAmelCase__ = len(__UpperCAmelCase ) - 1
def lowercase_ (self : int , __UpperCAmelCase : float ) -> list[float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
UpperCAmelCase__ = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , __UpperCAmelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(__UpperCAmelCase ) , 5 ) == 1
return output_values
def lowercase_ (self : Dict , __UpperCAmelCase : float ) -> tuple[float, float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
UpperCAmelCase__ = self.basis_function(__UpperCAmelCase )
UpperCAmelCase__ = 0.0
UpperCAmelCase__ = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def lowercase_ (self : Optional[int] , __UpperCAmelCase : float = 0.01 ) -> Optional[int]:
"""simple docstring"""
from matplotlib import pyplot as plt # type: ignore
UpperCAmelCase__ = [] # x coordinates of points to plot
UpperCAmelCase__ = [] # y coordinates of points to plot
UpperCAmelCase__ = 0.0
while t <= 1:
UpperCAmelCase__ = self.bezier_curve_function(__UpperCAmelCase )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
UpperCAmelCase__ = [i[0] for i in self.list_of_points]
UpperCAmelCase__ = [i[1] for i in self.list_of_points]
plt.plot(
__UpperCAmelCase , __UpperCAmelCase , color="blue" , label="Curve of Degree " + str(self.degree ) , )
plt.scatter(__UpperCAmelCase , __UpperCAmelCase , color="red" , label="Control Points" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 65 | 0 |
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = {
"huggingface/autoformer-tourism-monthly": "https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json",
}
class __UpperCAmelCase ( UpperCAmelCase_ ):
UpperCamelCase = 'autoformer'
UpperCamelCase = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self : List[str], __A : Optional[int] = None, __A : Optional[int] = None, __A : str = "student_t", __A : str = "nll", __A : int = 1, __A : List[int] = [1, 2, 3, 4, 5, 6, 7], __A : bool = True, __A : int = 0, __A : int = 0, __A : int = 0, __A : int = 0, __A : Optional[List[int]] = None, __A : Optional[List[int]] = None, __A : int = 6_4, __A : int = 2, __A : int = 2, __A : int = 2, __A : int = 2, __A : int = 3_2, __A : int = 3_2, __A : str = "gelu", __A : float = 0.1, __A : float = 0.1, __A : float = 0.1, __A : float = 0.1, __A : float = 0.1, __A : int = 1_0_0, __A : float = 0.0_2, __A : bool = True, __A : int=True, __A : int = 1_0, __A : int = 2_5, __A : int = 3, **__A : str, ):
UpperCAmelCase : Optional[int] = prediction_length
UpperCAmelCase : Dict = context_length if context_length is not None else prediction_length
UpperCAmelCase : str = distribution_output
UpperCAmelCase : List[str] = loss
UpperCAmelCase : Dict = input_size
UpperCAmelCase : str = num_time_features
UpperCAmelCase : List[str] = lags_sequence
UpperCAmelCase : Tuple = scaling
UpperCAmelCase : Dict = num_dynamic_real_features
UpperCAmelCase : List[Any] = num_static_real_features
UpperCAmelCase : Tuple = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(__UpperCAmelCase ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
UpperCAmelCase : Any = cardinality
else:
UpperCAmelCase : List[str] = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(__UpperCAmelCase ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
UpperCAmelCase : Tuple = embedding_dimension
else:
UpperCAmelCase : Optional[int] = [min(5_0, (cat + 1) // 2 ) for cat in self.cardinality]
UpperCAmelCase : Optional[int] = num_parallel_samples
# Transformer architecture configuration
UpperCAmelCase : Optional[int] = input_size * len(self.lags_sequence ) + self._number_of_features
UpperCAmelCase : List[Any] = d_model
UpperCAmelCase : List[Any] = encoder_attention_heads
UpperCAmelCase : Union[str, Any] = decoder_attention_heads
UpperCAmelCase : int = encoder_ffn_dim
UpperCAmelCase : Optional[Any] = decoder_ffn_dim
UpperCAmelCase : Optional[int] = encoder_layers
UpperCAmelCase : List[Any] = decoder_layers
UpperCAmelCase : int = dropout
UpperCAmelCase : int = attention_dropout
UpperCAmelCase : Dict = activation_dropout
UpperCAmelCase : List[Any] = encoder_layerdrop
UpperCAmelCase : Tuple = decoder_layerdrop
UpperCAmelCase : Any = activation_function
UpperCAmelCase : Union[str, Any] = init_std
UpperCAmelCase : Union[str, Any] = use_cache
# Autoformer
UpperCAmelCase : int = label_length
UpperCAmelCase : str = moving_average
UpperCAmelCase : Union[str, Any] = autocorrelation_factor
super().__init__(is_encoder_decoder=__UpperCAmelCase, **__UpperCAmelCase )
@property
def __magic_name__ ( self : List[str] ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 336 | import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(UpperCAmelCase_ ) , 'Tatoeba directory does not exist.' )
class A ( unittest.TestCase ):
@cached_property
def lowercase_ (self : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = tempfile.mkdtemp()
return TatoebaConverter(save_dir=__UpperCAmelCase )
@slow
def lowercase_ (self : List[Any] ) -> Optional[int]:
"""simple docstring"""
self.resolver.convert_models(["heb-eng"] )
@slow
def lowercase_ (self : Dict ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.resolver.write_model_card("opus-mt-he-en" , dry_run=__UpperCAmelCase )
assert mmeta["long_pair"] == "heb-eng"
| 65 | 0 |
"""simple docstring"""
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCamelCase( a , a , a ):
__a = TaConfig.from_json_file(__A )
print(F"Building PyTorch model from configuration: {config}" )
__a = TaForConditionalGeneration(__A )
# Load weights from tf checkpoint
load_tf_weights_in_ta(__A , __A , __A )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(__A )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
SCREAMING_SNAKE_CASE__:List[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 261 | import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
UpperCamelCase__ = np.linspace(start=0, stop=7_5, num=7_5, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
UpperCamelCase__ = [0, 2_5, 5_0]
UpperCamelCase__ = [2_5, 5_0, 7_5]
UpperCamelCase__ = fuzz.membership.trimf(X, abca)
UpperCamelCase__ = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
UpperCamelCase__ = np.ones(7_5)
UpperCamelCase__ = np.zeros((7_5,))
# 1. Union = max(µA(x), µB(x))
UpperCamelCase__ = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
UpperCamelCase__ = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
UpperCamelCase__ = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
UpperCamelCase__ = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
UpperCamelCase__ = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
UpperCamelCase__ = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
UpperCamelCase__ = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
UpperCamelCase__ = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 1_0)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 65 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class UpperCamelCase :
UpperCamelCase : Union[str, Any] = BlenderbotSmallConfig
UpperCamelCase : Tuple = {}
UpperCamelCase : str = 'gelu'
def __init__( self : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict=13 , UpperCAmelCase__ : List[str]=7 , UpperCAmelCase__ : int=True , UpperCAmelCase__ : List[str]=False , UpperCAmelCase__ : Optional[Any]=99 , UpperCAmelCase__ : Optional[Any]=32 , UpperCAmelCase__ : Optional[Any]=2 , UpperCAmelCase__ : str=4 , UpperCAmelCase__ : Union[str, Any]=37 , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : List[Any]=20 , UpperCAmelCase__ : List[Any]=2 , UpperCAmelCase__ : Optional[int]=1 , UpperCAmelCase__ : int=0 , ) -> List[Any]:
_a : Dict = parent
_a : Optional[int] = batch_size
_a : int = seq_length
_a : List[str] = is_training
_a : Tuple = use_labels
_a : Tuple = vocab_size
_a : Dict = hidden_size
_a : int = num_hidden_layers
_a : Dict = num_attention_heads
_a : str = intermediate_size
_a : Optional[int] = hidden_dropout_prob
_a : List[str] = attention_probs_dropout_prob
_a : List[Any] = max_position_embeddings
_a : int = eos_token_id
_a : Tuple = pad_token_id
_a : Union[str, Any] = bos_token_id
def _lowercase ( self : Any ) -> Optional[Any]:
_a : Dict = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_a : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_a : Tuple = tf.concat([input_ids, eos_tensor] , axis=1 )
_a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a : Any = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_a : Union[str, Any] = prepare_blenderbot_small_inputs_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return config, inputs_dict
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple ) -> List[str]:
_a : int = TFBlenderbotSmallModel(config=__UpperCAmelCase ).get_decoder()
_a : int = inputs_dict["""input_ids"""]
_a : Union[str, Any] = input_ids[:1, :]
_a : Dict = inputs_dict["""attention_mask"""][:1, :]
_a : Optional[int] = inputs_dict["""head_mask"""]
_a : List[str] = 1
# first forward pass
_a : Optional[int] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , head_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase )
_a , _a : str = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_a : str = ids_tensor((self.batch_size, 3) , config.vocab_size )
_a : Optional[int] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_a : Optional[int] = tf.concat([input_ids, next_tokens] , axis=-1 )
_a : Optional[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_a : List[Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )[0]
_a : Optional[Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_a : List[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_a : List[str] = output_from_no_past[:, -3:, random_slice_idx]
_a : Tuple = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , rtol=1E-3 )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , ):
'''simple docstring'''
if attention_mask is None:
_a : Optional[Any] = tf.cast(tf.math.not_equal(__A , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_a : List[Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_a : Union[str, Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_a : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_a : Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
UpperCamelCase : List[Any] = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
UpperCamelCase : Union[str, Any] = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
UpperCamelCase : int = (
{
'conversational': TFBlenderbotSmallForConditionalGeneration,
'feature-extraction': TFBlenderbotSmallModel,
'summarization': TFBlenderbotSmallForConditionalGeneration,
'text2text-generation': TFBlenderbotSmallForConditionalGeneration,
'translation': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCamelCase : Union[str, Any] = True
UpperCamelCase : str = False
UpperCamelCase : Optional[Any] = False
def _lowercase ( self : Dict ) -> List[str]:
_a : str = TFBlenderbotSmallModelTester(self )
_a : List[str] = ConfigTester(self , config_class=__UpperCAmelCase )
def _lowercase ( self : Tuple ) -> Tuple:
self.config_tester.run_common_tests()
def _lowercase ( self : Tuple ) -> Dict:
_a : int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__UpperCAmelCase )
@require_tokenizers
@require_tf
class UpperCamelCase ( unittest.TestCase ):
UpperCamelCase : str = [
'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '
' i\'m going to throw up.\nand why is that?'
]
UpperCamelCase : Dict = 'facebook/blenderbot_small-90M'
@cached_property
def _lowercase ( self : Union[str, Any] ) -> Any:
return BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" )
@cached_property
def _lowercase ( self : int ) -> str:
_a : Any = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def _lowercase ( self : int ) -> List[str]:
_a : Dict = self.tokenizer(self.src_text , return_tensors="""tf""" )
_a : Tuple = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__UpperCAmelCase , )
_a : Any = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__UpperCAmelCase )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 294 | from __future__ import annotations
from collections import deque
class A :
def __init__(self : Dict , __UpperCAmelCase : list[str] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = []
self.adlist.append(
{"value": "", "next_states": [], "fail_state": 0, "output": []} )
for keyword in keywords:
self.add_keyword(__UpperCAmelCase )
self.set_fail_transitions()
def lowercase_ (self : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : str ) -> int | None:
"""simple docstring"""
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def lowercase_ (self : Dict , __UpperCAmelCase : str ) -> None:
"""simple docstring"""
UpperCAmelCase__ = 0
for character in keyword:
UpperCAmelCase__ = self.find_next_state(__UpperCAmelCase , __UpperCAmelCase )
if next_state is None:
self.adlist.append(
{
"value": character,
"next_states": [],
"fail_state": 0,
"output": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
UpperCAmelCase__ = len(self.adlist ) - 1
else:
UpperCAmelCase__ = next_state
self.adlist[current_state]["output"].append(__UpperCAmelCase )
def lowercase_ (self : Optional[int] ) -> None:
"""simple docstring"""
UpperCAmelCase__ = deque()
for node in self.adlist[0]["next_states"]:
q.append(__UpperCAmelCase )
UpperCAmelCase__ = 0
while q:
UpperCAmelCase__ = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(__UpperCAmelCase )
UpperCAmelCase__ = self.adlist[r]["fail_state"]
while (
self.find_next_state(__UpperCAmelCase , self.adlist[child]["value"] ) is None
and state != 0
):
UpperCAmelCase__ = self.adlist[state]["fail_state"]
UpperCAmelCase__ = self.find_next_state(
__UpperCAmelCase , self.adlist[child]["value"] )
if self.adlist[child]["fail_state"] is None:
UpperCAmelCase__ = 0
UpperCAmelCase__ = (
self.adlist[child]["output"]
+ self.adlist[self.adlist[child]["fail_state"]]["output"]
)
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : str ) -> dict[str, list[int]]:
"""simple docstring"""
UpperCAmelCase__ = {} # returns a dict with keywords and list of its occurrences
UpperCAmelCase__ = 0
for i in range(len(__UpperCAmelCase ) ):
while (
self.find_next_state(__UpperCAmelCase , string[i] ) is None
and current_state != 0
):
UpperCAmelCase__ = self.adlist[current_state]["fail_state"]
UpperCAmelCase__ = self.find_next_state(__UpperCAmelCase , string[i] )
if next_state is None:
UpperCAmelCase__ = 0
else:
UpperCAmelCase__ = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
UpperCAmelCase__ = []
result[key].append(i - len(__UpperCAmelCase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class lowercase( unittest.TestCase ):
'''simple docstring'''
lowercase__ = StableDiffusionLDMaDPipeline
lowercase__ = TEXT_TO_IMAGE_PARAMS
lowercase__ = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D"""), up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D"""), cross_attention_dim=32, )
_snake_case : str = DDIMScheduler(
beta_start=0.00_085, beta_end=0.012, beta_schedule="""scaled_linear""", clip_sample=__UpperCAmelCase, set_alpha_to_one=__UpperCAmelCase, )
torch.manual_seed(0 )
_snake_case : int = AutoencoderKL(
block_out_channels=[32, 64], in_channels=6, out_channels=6, down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""], up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""], latent_channels=4, )
torch.manual_seed(0 )
_snake_case : int = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, )
_snake_case : Tuple = CLIPTextModel(__UpperCAmelCase )
_snake_case : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_snake_case : Any = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCamelCase_ ( self: Any, a_: Optional[Any], a_: Dict=0 ):
'''simple docstring'''
if str(__UpperCAmelCase ).startswith("""mps""" ):
_snake_case : List[Any] = torch.manual_seed(__UpperCAmelCase )
else:
_snake_case : Any = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
_snake_case : int = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_snake_case : Union[str, Any] = self.get_dummy_components()
_snake_case : str = StableDiffusionLDMaDPipeline(**__UpperCAmelCase )
_snake_case : Optional[Any] = ldmad_pipe.to(__UpperCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_snake_case : Union[str, Any] = self.get_dummy_inputs(__UpperCAmelCase )
_snake_case : Optional[int] = ldmad_pipe(**__UpperCAmelCase )
_snake_case , _snake_case : Tuple = output.rgb, output.depth
_snake_case : Tuple = rgb[0, -3:, -3:, -1]
_snake_case : Tuple = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
_snake_case : Optional[int] = np.array(
[0.37_338_176, 0.70_247, 0.74_203_193, 0.51_643_604, 0.58_256_793, 0.60_932_136, 0.4_181_095, 0.48_355_877, 0.46_535_262] )
_snake_case : List[Any] = np.array([103.46_727, 85.812_004, 87.849_236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1E-2
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : List[Any] = self.get_dummy_components()
_snake_case : Optional[int] = StableDiffusionLDMaDPipeline(**__UpperCAmelCase )
_snake_case : Tuple = ldmad_pipe.to(__UpperCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_snake_case : Dict = self.get_dummy_inputs(__UpperCAmelCase )
_snake_case : Optional[Any] = 3 * [inputs["""prompt"""]]
# forward
_snake_case : Any = ldmad_pipe(**__UpperCAmelCase )
_snake_case , _snake_case : str = output.rgb, output.depth
_snake_case : Optional[int] = rgb_slice_a[0, -3:, -3:, -1]
_snake_case : Any = depth_slice_a[0, -3:, -1]
_snake_case : Optional[Any] = self.get_dummy_inputs(__UpperCAmelCase )
_snake_case : Union[str, Any] = 3 * [inputs.pop("""prompt""" )]
_snake_case : List[str] = ldmad_pipe.tokenizer(
__UpperCAmelCase, padding="""max_length""", max_length=ldmad_pipe.tokenizer.model_max_length, truncation=__UpperCAmelCase, return_tensors="""pt""", )
_snake_case : int = text_inputs["""input_ids"""].to(__UpperCAmelCase )
_snake_case : int = ldmad_pipe.text_encoder(__UpperCAmelCase )[0]
_snake_case : str = prompt_embeds
# forward
_snake_case : List[Any] = ldmad_pipe(**__UpperCAmelCase )
_snake_case , _snake_case : Tuple = output.rgb, output.depth
_snake_case : Union[str, Any] = rgb_slice_a[0, -3:, -3:, -1]
_snake_case : Optional[Any] = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1E-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1E-4
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_snake_case : List[Any] = self.get_dummy_components()
_snake_case : Optional[Any] = PNDMScheduler(skip_prk_steps=__UpperCAmelCase )
_snake_case : Dict = StableDiffusionLDMaDPipeline(**__UpperCAmelCase )
_snake_case : Dict = ldmad_pipe.to(__UpperCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_snake_case : str = self.get_dummy_inputs(__UpperCAmelCase )
_snake_case : Dict = """french fries"""
_snake_case : List[str] = ldmad_pipe(**__UpperCAmelCase, negative_prompt=__UpperCAmelCase )
_snake_case , _snake_case : str = output.rgb, output.depth
_snake_case : Union[str, Any] = rgb[0, -3:, -3:, -1]
_snake_case : Any = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
_snake_case : Dict = np.array(
[0.37_044, 0.71_811_503, 0.7_223_251, 0.48_603_675, 0.5_638_391, 0.6_364_948, 0.42_833_704, 0.4_901_315, 0.47_926_217] )
_snake_case : Tuple = np.array([107.84_738, 84.62_802, 89.962_135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1E-2
@slow
@require_torch_gpu
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self: Tuple, a_: Optional[int], a_: Tuple="cpu", a_: Tuple=torch.floataa, a_: Optional[int]=0 ):
'''simple docstring'''
_snake_case : int = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
_snake_case : List[Any] = np.random.RandomState(__UpperCAmelCase ).standard_normal((1, 4, 64, 64) )
_snake_case : Tuple = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase, dtype=__UpperCAmelCase )
_snake_case : Optional[Any] = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Tuple = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d""" )
_snake_case : str = ldmad_pipe.to(__UpperCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_snake_case : Optional[int] = self.get_inputs(__UpperCAmelCase )
_snake_case : int = ldmad_pipe(**__UpperCAmelCase )
_snake_case , _snake_case : Union[str, Any] = output.rgb, output.depth
_snake_case : List[str] = rgb[0, -3:, -3:, -1].flatten()
_snake_case : Tuple = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
_snake_case : int = np.array(
[0.53_805_465, 0.56_707_305, 0.5_486_515, 0.57_012_236, 0.5_814_511, 0.56_253_487, 0.54_843_014, 0.55_092_263, 0.6_459_706] )
_snake_case : Any = np.array(
[0.9_263_781, 0.6_678_672, 0.5_486_515, 0.92_202_145, 0.67_831_135, 0.56_253_487, 0.9_241_694, 0.7_551_478, 0.6_459_706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3E-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3E-3
@nightly
@require_torch_gpu
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self: Optional[int], a_: int, a_: Optional[Any]="cpu", a_: Optional[int]=torch.floataa, a_: Optional[int]=0 ):
'''simple docstring'''
_snake_case : Any = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
_snake_case : Any = np.random.RandomState(__UpperCAmelCase ).standard_normal((1, 4, 64, 64) )
_snake_case : Union[str, Any] = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase, dtype=__UpperCAmelCase )
_snake_case : Union[str, Any] = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 50,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : List[str] = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d""" ).to(__UpperCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_snake_case : Any = self.get_inputs(__UpperCAmelCase )
_snake_case : Optional[int] = ldmad_pipe(**__UpperCAmelCase )
_snake_case , _snake_case : Dict = output.rgb, output.depth
_snake_case : List[str] = 0.495_586
_snake_case : Tuple = 0.33_795_515
_snake_case : Optional[Any] = 112.48_518
_snake_case : Optional[int] = 98.489_746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Optional[Any] = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d-4c""" ).to(__UpperCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_snake_case : str = self.get_inputs(__UpperCAmelCase )
_snake_case : List[Any] = ldmad_pipe(**__UpperCAmelCase )
_snake_case , _snake_case : Dict = output.rgb, output.depth
_snake_case : int = 0.4_194_127
_snake_case : List[Any] = 0.35_375_586
_snake_case : Tuple = 0.5_638_502
_snake_case : Dict = 0.34_686_103
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
| 64 | import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
UpperCamelCase__ = logging.get_logger(__name__)
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : int = ['input_values', 'attention_mask']
def __init__(self : Any , __UpperCAmelCase : int = 1 , __UpperCAmelCase : int = 1_6_0_0_0 , __UpperCAmelCase : float = 0.0 , __UpperCAmelCase : bool = False , __UpperCAmelCase : int = 8_0 , __UpperCAmelCase : int = 1_6 , __UpperCAmelCase : int = 6_4 , __UpperCAmelCase : str = "hann_window" , __UpperCAmelCase : float = 1.0 , __UpperCAmelCase : float = 8_0 , __UpperCAmelCase : float = 7_6_0_0 , __UpperCAmelCase : float = 1E-10 , __UpperCAmelCase : int = 2 , __UpperCAmelCase : bool = True , **__UpperCAmelCase : Any , ) -> str:
"""simple docstring"""
super().__init__(feature_size=__UpperCAmelCase , sampling_rate=__UpperCAmelCase , padding_value=__UpperCAmelCase , **__UpperCAmelCase )
UpperCAmelCase__ = do_normalize
UpperCAmelCase__ = return_attention_mask
UpperCAmelCase__ = num_mel_bins
UpperCAmelCase__ = hop_length
UpperCAmelCase__ = win_length
UpperCAmelCase__ = win_function
UpperCAmelCase__ = frame_signal_scale
UpperCAmelCase__ = fmin
UpperCAmelCase__ = fmax
UpperCAmelCase__ = mel_floor
UpperCAmelCase__ = reduction_factor
UpperCAmelCase__ = win_length * sampling_rate // 1_0_0_0
UpperCAmelCase__ = hop_length * sampling_rate // 1_0_0_0
UpperCAmelCase__ = optimal_fft_length(self.sample_size )
UpperCAmelCase__ = (self.n_fft // 2) + 1
UpperCAmelCase__ = window_function(window_length=self.sample_size , name=self.win_function , periodic=__UpperCAmelCase )
UpperCAmelCase__ = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm="slaney" , mel_scale="slaney" , )
if frame_signal_scale != 1.0:
warnings.warn(
"The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers" , __UpperCAmelCase , )
if reduction_factor != 2.0:
warnings.warn(
"The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers" , __UpperCAmelCase , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def lowercase_ (__UpperCAmelCase : List[np.ndarray] , __UpperCAmelCase : List[np.ndarray] , __UpperCAmelCase : float = 0.0 ) -> List[np.ndarray]:
"""simple docstring"""
if attention_mask is not None:
UpperCAmelCase__ = np.array(__UpperCAmelCase , np.intaa )
UpperCAmelCase__ = []
for vector, length in zip(__UpperCAmelCase , attention_mask.sum(-1 ) ):
UpperCAmelCase__ = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
UpperCAmelCase__ = padding_value
normed_input_values.append(__UpperCAmelCase )
else:
UpperCAmelCase__ = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def lowercase_ (self : Optional[int] , __UpperCAmelCase : np.ndarray , ) -> np.ndarray:
"""simple docstring"""
UpperCAmelCase__ = spectrogram(
__UpperCAmelCase , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel="log10" , )
return log_mel_spec.T
def __call__(self : Any , __UpperCAmelCase : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , __UpperCAmelCase : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , __UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : bool = False , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[Union[str, TensorType]] = None , __UpperCAmelCase : Optional[int] = None , **__UpperCAmelCase : str , ) -> BatchFeature:
"""simple docstring"""
if audio is None and audio_target is None:
raise ValueError("You must provide either `audio` or `audio_target` values." )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
if audio is not None:
UpperCAmelCase__ = self._process_audio(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , )
else:
UpperCAmelCase__ = None
if audio_target is not None:
UpperCAmelCase__ = self._process_audio(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , )
if inputs is None:
return inputs_target
else:
UpperCAmelCase__ = inputs_target["input_values"]
UpperCAmelCase__ = inputs_target.get("attention_mask" )
if decoder_attention_mask is not None:
UpperCAmelCase__ = decoder_attention_mask
return inputs
def lowercase_ (self : Optional[int] , __UpperCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __UpperCAmelCase : bool = False , __UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : bool = False , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[Union[str, TensorType]] = None , **__UpperCAmelCase : Any , ) -> BatchFeature:
"""simple docstring"""
UpperCAmelCase__ = isinstance(__UpperCAmelCase , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
UpperCAmelCase__ = is_batched_numpy or (
isinstance(__UpperCAmelCase , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCAmelCase__ = [np.asarray(__UpperCAmelCase , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(__UpperCAmelCase , np.ndarray ):
UpperCAmelCase__ = np.asarray(__UpperCAmelCase , dtype=np.floataa )
elif isinstance(__UpperCAmelCase , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
UpperCAmelCase__ = speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCAmelCase__ = [speech]
# needed to make pad() work on spectrogram inputs
UpperCAmelCase__ = self.feature_size
# convert into correct format for padding
if is_target:
UpperCAmelCase__ = [self._extract_mel_features(__UpperCAmelCase ) for waveform in speech]
UpperCAmelCase__ = BatchFeature({"input_values": features} )
UpperCAmelCase__ = self.num_mel_bins
else:
UpperCAmelCase__ = BatchFeature({"input_values": speech} )
UpperCAmelCase__ = self.pad(
__UpperCAmelCase , padding=__UpperCAmelCase , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , **__UpperCAmelCase , )
UpperCAmelCase__ = feature_size_hack
# convert input values to correct format
UpperCAmelCase__ = padded_inputs["input_values"]
if not isinstance(input_values[0] , np.ndarray ):
UpperCAmelCase__ = [np.asarray(__UpperCAmelCase , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(__UpperCAmelCase , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
UpperCAmelCase__ = [array.astype(np.floataa ) for array in input_values]
elif isinstance(__UpperCAmelCase , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
UpperCAmelCase__ = input_values.astype(np.floataa )
# convert attention_mask to correct format
UpperCAmelCase__ = padded_inputs.get("attention_mask" )
if attention_mask is not None:
UpperCAmelCase__ = [np.asarray(__UpperCAmelCase , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
UpperCAmelCase__ = (
attention_mask
if self._get_padding_strategies(__UpperCAmelCase , max_length=__UpperCAmelCase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
UpperCAmelCase__ = self.zero_mean_unit_var_norm(
padded_inputs["input_values"] , attention_mask=__UpperCAmelCase , padding_value=self.padding_value )
if return_tensors is not None:
UpperCAmelCase__ = padded_inputs.convert_to_tensors(__UpperCAmelCase )
return padded_inputs
def lowercase_ (self : Tuple ) -> Dict[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = super().to_dict()
# Don't serialize these as they are derived from the other properties.
UpperCAmelCase__ = ["window", "mel_filters", "sample_size", "sample_stride", "n_fft", "n_freqs"]
for name in names:
if name in output:
del output[name]
return output
| 65 | 0 |
"""simple docstring"""
def __magic_name__ ( __snake_case : Any ) -> list:
if any(not isinstance(__A , __A ) or x < 0 for x in sequence ):
raise TypeError("Sequence must be list of non-negative integers" )
for _ in range(len(__A ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(__A , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 202 | from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Optional[torch.FloatTensor] = None
__UpperCAmelCase : torch.FloatTensor = None
__UpperCAmelCase : Optional[Tuple[torch.FloatTensor]] = None
__UpperCAmelCase : Optional[Tuple[torch.FloatTensor]] = None
class A ( UpperCAmelCase_ ):
def __init__(self : Union[str, Any] , __UpperCAmelCase : Tuple=1 , __UpperCAmelCase : str=0 , __UpperCAmelCase : str=2 , __UpperCAmelCase : Union[str, Any]=5_1_2 , __UpperCAmelCase : List[str]="cls" , __UpperCAmelCase : Optional[int]=False , __UpperCAmelCase : str=True , **__UpperCAmelCase : str , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
UpperCAmelCase__ = project_dim
UpperCAmelCase__ = pooler_fn
UpperCAmelCase__ = learn_encoder
UpperCAmelCase__ = use_attention_mask
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Tuple = [r'pooler', r'logit_scale']
__UpperCAmelCase : int = [r'position_ids', r'predictions.decoder.bias']
__UpperCAmelCase : Any = 'roberta'
__UpperCAmelCase : List[str] = RobertaSeriesConfig
def __init__(self : Tuple , __UpperCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
super().__init__(__UpperCAmelCase )
UpperCAmelCase__ = XLMRobertaModel(__UpperCAmelCase )
UpperCAmelCase__ = nn.Linear(config.hidden_size , config.project_dim )
UpperCAmelCase__ = getattr(__UpperCAmelCase , "has_pre_transformation" , __UpperCAmelCase )
if self.has_pre_transformation:
UpperCAmelCase__ = nn.Linear(config.hidden_size , config.project_dim )
UpperCAmelCase__ = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def lowercase_ (self : Optional[Any] , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[bool] = None , ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase__ = self.base_model(
input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , position_ids=__UpperCAmelCase , head_mask=__UpperCAmelCase , inputs_embeds=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , output_attentions=__UpperCAmelCase , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=__UpperCAmelCase , )
if self.has_pre_transformation:
UpperCAmelCase__ = outputs["hidden_states"][-2]
UpperCAmelCase__ = self.pre_LN(__UpperCAmelCase )
UpperCAmelCase__ = self.transformation_pre(__UpperCAmelCase )
return TransformationModelOutput(
projection_state=__UpperCAmelCase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
UpperCAmelCase__ = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=__UpperCAmelCase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 65 | 0 |
"""simple docstring"""
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
lowerCamelCase_ = get_logger(__name__)
lowerCamelCase_ = r'''\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n'''
class UpperCamelCase_ :
@add_start_docstrings(__UpperCAmelCase )
def __call__( self : Optional[Any] , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : jnp.ndarray ) -> jnp.ndarray:
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class UpperCamelCase_ :
@add_start_docstrings(__UpperCAmelCase )
def __call__( self : Tuple , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : jnp.ndarray ) -> jnp.ndarray:
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class UpperCamelCase_ (UpperCAmelCase_ ):
@add_start_docstrings(__UpperCAmelCase )
def __call__( self : Tuple , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : int , **lowerCAmelCase_ : str ) -> jnp.ndarray:
for processor in self:
UpperCAmelCase_ : List[Any] = inspect.signature(processor.__call__ ).parameters
if len(__UpperCAmelCase ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
f"""Make sure that all the required parameters: {list(function_args.keys() )} for """
f"""{processor.__class__} are passed to the logits processor.""" )
UpperCAmelCase_ : Union[str, Any] = processor(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
else:
UpperCAmelCase_ : List[str] = processor(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return scores
class UpperCamelCase_ (UpperCAmelCase_ ):
def __init__( self : Any , lowerCAmelCase_ : float ) -> Union[str, Any]:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or not (temperature > 0):
raise ValueError(f"""`temperature` has to be a strictly positive float, but is {temperature}""" )
UpperCAmelCase_ : str = temperature
def __call__( self : Union[str, Any] , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : int ) -> jnp.ndarray:
UpperCAmelCase_ : Dict = scores / self.temperature
return scores
class UpperCamelCase_ (UpperCAmelCase_ ):
def __init__( self : Any , lowerCAmelCase_ : float , lowerCAmelCase_ : float = -float("Inf" ) , lowerCAmelCase_ : int = 1 ) -> str:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or (top_p < 0 or top_p > 1.0):
raise ValueError(f"""`top_p` has to be a float > 0 and < 1, but is {top_p}""" )
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or (min_tokens_to_keep < 1):
raise ValueError(f"""`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}""" )
UpperCAmelCase_ : Dict = top_p
UpperCAmelCase_ : Optional[Any] = filter_value
UpperCAmelCase_ : Dict = min_tokens_to_keep
def __call__( self : Any , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : int ) -> jnp.ndarray:
UpperCAmelCase_ , UpperCAmelCase_ : str = lax.top_k(__UpperCAmelCase , scores.shape[-1] )
UpperCAmelCase_ : str = jnp.full_like(__UpperCAmelCase , self.filter_value )
UpperCAmelCase_ : Optional[int] = jax.nn.softmax(__UpperCAmelCase , axis=-1 ).cumsum(axis=-1 )
UpperCAmelCase_ : List[Any] = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
UpperCAmelCase_ : List[str] = jnp.roll(__UpperCAmelCase , 1 )
score_mask |= score_mask.at[:, 0].set(__UpperCAmelCase )
# min tokens to keep
UpperCAmelCase_ : Union[str, Any] = score_mask.at[:, : self.min_tokens_to_keep].set(__UpperCAmelCase )
UpperCAmelCase_ : int = jnp.where(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase_ : int = jax.lax.sort_key_val(__UpperCAmelCase , __UpperCAmelCase )[-1]
return next_scores
class UpperCamelCase_ (UpperCAmelCase_ ):
def __init__( self : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : float = -float("Inf" ) , lowerCAmelCase_ : int = 1 ) -> str:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or top_k <= 0:
raise ValueError(f"""`top_k` has to be a strictly positive integer, but is {top_k}""" )
UpperCAmelCase_ : List[Any] = max(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase_ : List[Any] = filter_value
def __call__( self : Any , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : int ) -> jnp.ndarray:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = scores.shape
UpperCAmelCase_ : str = jnp.full(batch_size * vocab_size , self.filter_value )
UpperCAmelCase_ : str = min(self.top_k , scores.shape[-1] ) # Safety check
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = lax.top_k(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase_ : int = jnp.broadcast_to((jnp.arange(__UpperCAmelCase ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
UpperCAmelCase_ : List[Any] = topk_scores.flatten()
UpperCAmelCase_ : str = topk_indices.flatten() + shift
UpperCAmelCase_ : Optional[Any] = next_scores_flat.at[topk_indices_flat].set(__UpperCAmelCase )
UpperCAmelCase_ : Optional[int] = next_scores_flat.reshape(__UpperCAmelCase , __UpperCAmelCase )
return next_scores
class UpperCamelCase_ (UpperCAmelCase_ ):
def __init__( self : Optional[Any] , lowerCAmelCase_ : int ) -> Tuple:
UpperCAmelCase_ : Tuple = bos_token_id
def __call__( self : Optional[Any] , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : int ) -> jnp.ndarray:
UpperCAmelCase_ : Union[str, Any] = jnp.full(scores.shape , -float("inf" ) )
UpperCAmelCase_ : Optional[int] = 1 - jnp.bool_(cur_len - 1 )
UpperCAmelCase_ : Tuple = jnp.where(__UpperCAmelCase , new_scores.at[:, self.bos_token_id].set(0 ) , __UpperCAmelCase )
return scores
class UpperCamelCase_ (UpperCAmelCase_ ):
def __init__( self : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> Union[str, Any]:
UpperCAmelCase_ : str = max_length
UpperCAmelCase_ : Tuple = eos_token_id
def __call__( self : Optional[int] , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : int ) -> jnp.ndarray:
UpperCAmelCase_ : Any = jnp.full(scores.shape , -float("inf" ) )
UpperCAmelCase_ : int = 1 - jnp.bool_(cur_len - self.max_length + 1 )
UpperCAmelCase_ : List[str] = jnp.where(__UpperCAmelCase , new_scores.at[:, self.eos_token_id].set(0 ) , __UpperCAmelCase )
return scores
class UpperCamelCase_ (UpperCAmelCase_ ):
def __init__( self : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> Tuple:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or min_length < 0:
raise ValueError(f"""`min_length` has to be a positive integer, but is {min_length}""" )
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or eos_token_id < 0:
raise ValueError(f"""`eos_token_id` has to be a positive integer, but is {eos_token_id}""" )
UpperCAmelCase_ : int = min_length
UpperCAmelCase_ : int = eos_token_id
def __call__( self : Optional[int] , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : int ) -> jnp.ndarray:
UpperCAmelCase_ : Tuple = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
UpperCAmelCase_ : Optional[Any] = jnp.where(__UpperCAmelCase , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , __UpperCAmelCase )
return scores
class UpperCamelCase_ (UpperCAmelCase_ ):
def __init__( self : Union[str, Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple ) -> List[str]:
UpperCAmelCase_ : Optional[int] = list(__UpperCAmelCase )
UpperCAmelCase_ : Optional[Any] = begin_index
def __call__( self : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int ) -> str:
UpperCAmelCase_ : Union[str, Any] = 1 - jnp.bool_(cur_len - self.begin_index )
UpperCAmelCase_ : List[Any] = jnp.where(__UpperCAmelCase , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , __UpperCAmelCase )
return scores
class UpperCamelCase_ (UpperCAmelCase_ ):
def __init__( self : List[Any] , lowerCAmelCase_ : list ) -> str:
UpperCAmelCase_ : List[str] = list(__UpperCAmelCase )
def __call__( self : List[str] , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : int ) -> jnp.ndarray:
UpperCAmelCase_ : Optional[int] = scores.at[..., self.suppress_tokens].set(-float("inf" ) )
return scores
class UpperCamelCase_ (UpperCAmelCase_ ):
def __init__( self : List[Any] , lowerCAmelCase_ : List[str] ) -> Optional[int]:
UpperCAmelCase_ : str = dict(__UpperCAmelCase )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
UpperCAmelCase_ : Tuple = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
UpperCAmelCase_ : Union[str, Any] = force_token_array.at[index].set(__UpperCAmelCase )
UpperCAmelCase_ : List[str] = jnp.intaa(__UpperCAmelCase )
def __call__( self : List[str] , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : int ) -> jnp.ndarray:
def _force_token(lowerCAmelCase_ : List[Any] ):
UpperCAmelCase_ : str = scores.shape[0]
UpperCAmelCase_ : List[Any] = self.force_token_array[generation_idx]
UpperCAmelCase_ : Any = jnp.ones_like(__UpperCAmelCase , dtype=scores.dtype ) * -float("inf" )
UpperCAmelCase_ : List[Any] = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
UpperCAmelCase_ : Union[str, Any] = lax.dynamic_update_slice(__UpperCAmelCase , __UpperCAmelCase , (0, current_token) )
return new_scores
UpperCAmelCase_ : int = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(__UpperCAmelCase ) , lambda: scores , ) , )
return scores
class UpperCamelCase_ (UpperCAmelCase_ ):
def __init__( self : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = generate_config.eos_token_id
UpperCAmelCase_ : Optional[int] = generate_config.no_timestamps_token_id
UpperCAmelCase_ : List[str] = generate_config.no_timestamps_token_id + 1
UpperCAmelCase_ : Optional[int] = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(__UpperCAmelCase , "max_initial_timestamp_index" ):
UpperCAmelCase_ : int = generate_config.max_initial_timestamp_index
else:
UpperCAmelCase_ : Optional[int] = model_config.vocab_size
if self.max_initial_timestamp_index is None:
UpperCAmelCase_ : Optional[Any] = model_config.vocab_size
def __call__( self : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any , lowerCAmelCase_ : str ) -> Union[str, Any]:
UpperCAmelCase_ : Dict = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) )
def handle_pairs(lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] ):
UpperCAmelCase_ : Optional[int] = jnp.where((cur_len - self.begin_index) >= 1 , __UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase_ : int = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , __UpperCAmelCase , )
UpperCAmelCase_ : Tuple = jnp.where((cur_len - self.begin_index) < 2 , __UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase_ : int = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , __UpperCAmelCase , __UpperCAmelCase , )
return jnp.where(
__UpperCAmelCase , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , __UpperCAmelCase , )
UpperCAmelCase_ : Optional[int] = jax.vmap(__UpperCAmelCase )(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase_ : List[str] = jnp.where(cur_len == self.begin_index , __UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase_ : List[Any] = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , __UpperCAmelCase , )
UpperCAmelCase_ : str = self.timestamp_begin + self.max_initial_timestamp_index
UpperCAmelCase_ : Dict = jnp.where(
__UpperCAmelCase , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , __UpperCAmelCase , )
# if sum of probability over timestamps is above any other token, sample timestamp
UpperCAmelCase_ : str = jax.nn.log_softmax(__UpperCAmelCase , axis=-1 )
def handle_cumulative_probs(lowerCAmelCase_ : int , lowerCAmelCase_ : Any ):
UpperCAmelCase_ : Dict = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
UpperCAmelCase_ : Any = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , __UpperCAmelCase , )
UpperCAmelCase_ : Union[str, Any] = jax.vmap(__UpperCAmelCase )(__UpperCAmelCase , __UpperCAmelCase )
return scores
| 268 | import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 6_50, 'eval_accuracy': 0.6, 'eval_loss': 0.9},
},
{
'framework': 'tensorflow',
'script': 'run_tf.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 6_00, 'eval_accuracy': 0.3, 'eval_loss': 0.9},
},
] )
class A ( unittest.TestCase ):
def lowercase_ (self : int ) -> Optional[Any]:
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="utf-8" , check=__UpperCAmelCase , )
assert hasattr(self , "env" )
def lowercase_ (self : List[Any] , __UpperCAmelCase : Optional[int]=1 ) -> Dict:
"""simple docstring"""
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-single""" , instance_count=__UpperCAmelCase , instance_type=self.instance_type , debugger_hook_config=__UpperCAmelCase , hyperparameters={**self.env.hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="py36" , )
def lowercase_ (self : Optional[Any] , __UpperCAmelCase : Tuple ) -> Optional[int]:
"""simple docstring"""
TrainingJobAnalytics(__UpperCAmelCase ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
def lowercase_ (self : Any ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.create_estimator()
# run training
estimator.fit()
# result dataframe
UpperCAmelCase__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCAmelCase__ = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
UpperCAmelCase__ = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCAmelCase__ = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , __UpperCAmelCase )
| 65 | 0 |
"""simple docstring"""
from collections import deque
from .hash_table import HashTable
class _lowerCamelCase ( UpperCAmelCase_ ):
def __init__(self , *__a , **__a ) -> int:
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
def snake_case_ (self , __a , __a ) -> str:
UpperCamelCase = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(__UpperCAmelCase )
UpperCamelCase = self.values[key]
def snake_case_ (self ) -> List[Any]:
return (
sum(self.charge_factor - len(__UpperCAmelCase ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def snake_case_ (self , __a , __a=None ) -> Optional[int]:
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(__UpperCAmelCase ) == 0
):
return key
return super()._collision_resolution(__UpperCAmelCase , __UpperCAmelCase )
| 153 | import math
import random
def lowerCAmelCase_ ( __A, __A = False ) -> float:
'''simple docstring'''
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
UpperCamelCase__ = 0.0_2
def lowerCAmelCase_ ( __A, __A ) -> float:
'''simple docstring'''
UpperCAmelCase__ = float(2 * (random.randint(1, 100 )) - 1 )
for _ in range(__A ):
# Forward propagation
UpperCAmelCase__ = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
UpperCAmelCase__ = (expected / 100) - layer_a
# Error delta
UpperCAmelCase__ = layer_1_error * sigmoid_function(__A, __A )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ = int(input('Expected value: '))
UpperCamelCase__ = int(input('Number of propagations: '))
print(forward_propagation(expected, number_propagations))
| 65 | 0 |
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ :str = logging.get_logger(__name__)
a_ :Dict = {
"kakaobrain/align-base": "https://huggingface.co/kakaobrain/align-base/resolve/main/config.json",
}
class snake_case__ ( UpperCAmelCase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 'align_text_model'
def __init__( self : Dict, _snake_case : List[str]=3_0_5_2_2, _snake_case : str=7_6_8, _snake_case : int=1_2, _snake_case : List[str]=1_2, _snake_case : Any=3_0_7_2, _snake_case : Any="gelu", _snake_case : Optional[int]=0.1, _snake_case : int=0.1, _snake_case : Dict=5_1_2, _snake_case : Dict=2, _snake_case : Tuple=0.0_2, _snake_case : int=1e-12, _snake_case : Dict=0, _snake_case : Optional[int]="absolute", _snake_case : str=True, **_snake_case : List[Any], ) ->Dict:
super().__init__(**__UpperCAmelCase )
snake_case__ : Any = vocab_size
snake_case__ : List[str] = hidden_size
snake_case__ : Dict = num_hidden_layers
snake_case__ : str = num_attention_heads
snake_case__ : int = hidden_act
snake_case__ : Tuple = intermediate_size
snake_case__ : List[Any] = hidden_dropout_prob
snake_case__ : Any = attention_probs_dropout_prob
snake_case__ : str = max_position_embeddings
snake_case__ : Optional[int] = type_vocab_size
snake_case__ : Tuple = initializer_range
snake_case__ : int = layer_norm_eps
snake_case__ : Optional[Any] = position_embedding_type
snake_case__ : Optional[Any] = use_cache
snake_case__ : str = pad_token_id
@classmethod
def lowercase_ ( cls : Tuple, _snake_case : Union[str, os.PathLike], **_snake_case : int ) ->"PretrainedConfig":
cls._set_token_in_kwargs(__UpperCAmelCase )
snake_case__ , snake_case__ : List[str] = cls.get_config_dict(__UpperCAmelCase, **__UpperCAmelCase )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('model_type' ) == "align":
snake_case__ : Dict = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls, 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__UpperCAmelCase, **__UpperCAmelCase )
class snake_case__ ( UpperCAmelCase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 'align_vision_model'
def __init__( self : Any, _snake_case : int = 3, _snake_case : int = 6_0_0, _snake_case : float = 2.0, _snake_case : float = 3.1, _snake_case : int = 8, _snake_case : List[int] = [3, 3, 5, 3, 5, 5, 3], _snake_case : List[int] = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2], _snake_case : List[int] = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0], _snake_case : List[int] = [], _snake_case : List[int] = [1, 2, 2, 2, 1, 2, 1], _snake_case : List[int] = [1, 2, 2, 3, 3, 4, 1], _snake_case : List[int] = [1, 6, 6, 6, 6, 6, 6], _snake_case : float = 0.2_5, _snake_case : str = "swish", _snake_case : int = 2_5_6_0, _snake_case : str = "mean", _snake_case : float = 0.0_2, _snake_case : float = 0.0_0_1, _snake_case : float = 0.9_9, _snake_case : float = 0.2, **_snake_case : List[Any], ) ->Union[str, Any]:
super().__init__(**__UpperCAmelCase )
snake_case__ : int = num_channels
snake_case__ : Union[str, Any] = image_size
snake_case__ : Tuple = width_coefficient
snake_case__ : Optional[int] = depth_coefficient
snake_case__ : Dict = depth_divisor
snake_case__ : Dict = kernel_sizes
snake_case__ : Union[str, Any] = in_channels
snake_case__ : Any = out_channels
snake_case__ : Optional[Any] = depthwise_padding
snake_case__ : Union[str, Any] = strides
snake_case__ : Any = num_block_repeats
snake_case__ : List[Any] = expand_ratios
snake_case__ : List[str] = squeeze_expansion_ratio
snake_case__ : Tuple = hidden_act
snake_case__ : Union[str, Any] = hidden_dim
snake_case__ : List[Any] = pooling_type
snake_case__ : List[str] = initializer_range
snake_case__ : Optional[Any] = batch_norm_eps
snake_case__ : Dict = batch_norm_momentum
snake_case__ : Optional[int] = drop_connect_rate
snake_case__ : List[str] = sum(__UpperCAmelCase ) * 4
@classmethod
def lowercase_ ( cls : List[str], _snake_case : Union[str, os.PathLike], **_snake_case : Any ) ->"PretrainedConfig":
cls._set_token_in_kwargs(__UpperCAmelCase )
snake_case__ , snake_case__ : Tuple = cls.get_config_dict(__UpperCAmelCase, **__UpperCAmelCase )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('model_type' ) == "align":
snake_case__ : Union[str, Any] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls, 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__UpperCAmelCase, **__UpperCAmelCase )
class snake_case__ ( UpperCAmelCase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 'align'
_SCREAMING_SNAKE_CASE = True
def __init__( self : str, _snake_case : Any=None, _snake_case : Union[str, Any]=None, _snake_case : Tuple=6_4_0, _snake_case : int=1.0, _snake_case : List[str]=0.0_2, **_snake_case : Optional[int], ) ->Any:
super().__init__(**__UpperCAmelCase )
if text_config is None:
snake_case__ : int = {}
logger.info('text_config is None. Initializing the AlignTextConfig with default values.' )
if vision_config is None:
snake_case__ : List[str] = {}
logger.info('vision_config is None. Initializing the AlignVisionConfig with default values.' )
snake_case__ : Any = AlignTextConfig(**__UpperCAmelCase )
snake_case__ : Optional[Any] = AlignVisionConfig(**__UpperCAmelCase )
snake_case__ : Optional[Any] = projection_dim
snake_case__ : int = temperature_init_value
snake_case__ : Optional[int] = initializer_range
@classmethod
def lowercase_ ( cls : int, _snake_case : AlignTextConfig, _snake_case : AlignVisionConfig, **_snake_case : int ) ->Dict:
return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **__UpperCAmelCase )
def lowercase_ ( self : str ) ->Union[str, Any]:
snake_case__ : int = copy.deepcopy(self.__dict__ )
snake_case__ : Any = self.text_config.to_dict()
snake_case__ : List[Any] = self.vision_config.to_dict()
snake_case__ : List[str] = self.__class__.model_type
return output
| 277 | from __future__ import annotations
class A :
def __init__(self : Union[str, Any] , __UpperCAmelCase : list[list[int]] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = TypeError(
"Matrices must be formed from a list of zero or more lists containing at "
"least one and the same number of values, each of which must be of type "
"int or float." )
if len(__UpperCAmelCase ) != 0:
UpperCAmelCase__ = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(__UpperCAmelCase ) != cols:
raise error
for value in row:
if not isinstance(__UpperCAmelCase , (int, float) ):
raise error
UpperCAmelCase__ = rows
else:
UpperCAmelCase__ = []
def lowercase_ (self : Any ) -> list[list[int]]:
"""simple docstring"""
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def lowercase_ (self : Any ) -> int:
"""simple docstring"""
return len(self.rows )
@property
def lowercase_ (self : Union[str, Any] ) -> int:
"""simple docstring"""
return len(self.rows[0] )
@property
def lowercase_ (self : List[Any] ) -> tuple[int, int]:
"""simple docstring"""
return (self.num_rows, self.num_columns)
@property
def lowercase_ (self : Tuple ) -> bool:
"""simple docstring"""
return self.order[0] == self.order[1]
def lowercase_ (self : Any ) -> Matrix:
"""simple docstring"""
UpperCAmelCase__ = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(__UpperCAmelCase )
def lowercase_ (self : int ) -> int:
"""simple docstring"""
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def lowercase_ (self : Tuple ) -> bool:
"""simple docstring"""
return bool(self.determinant() )
def lowercase_ (self : Dict , __UpperCAmelCase : int , __UpperCAmelCase : int ) -> int:
"""simple docstring"""
UpperCAmelCase__ = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(__UpperCAmelCase ).determinant()
def lowercase_ (self : int , __UpperCAmelCase : int , __UpperCAmelCase : int ) -> int:
"""simple docstring"""
if (row + column) % 2 == 0:
return self.get_minor(__UpperCAmelCase , __UpperCAmelCase )
return -1 * self.get_minor(__UpperCAmelCase , __UpperCAmelCase )
def lowercase_ (self : Union[str, Any] ) -> Matrix:
"""simple docstring"""
return Matrix(
[
[self.get_minor(__UpperCAmelCase , __UpperCAmelCase ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def lowercase_ (self : List[str] ) -> Matrix:
"""simple docstring"""
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def lowercase_ (self : Optional[Any] ) -> Matrix:
"""simple docstring"""
UpperCAmelCase__ = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(__UpperCAmelCase )
def lowercase_ (self : List[Any] ) -> Matrix:
"""simple docstring"""
UpperCAmelCase__ = self.determinant()
if not determinant:
raise TypeError("Only matrices with a non-zero determinant have an inverse" )
return self.adjugate() * (1 / determinant)
def __repr__(self : Dict ) -> str:
"""simple docstring"""
return str(self.rows )
def __str__(self : Optional[Any] ) -> str:
"""simple docstring"""
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
"[" + ". ".join([str(__UpperCAmelCase ) for value in row] ) + ".]"
for row in self.rows
] )
+ "]"
)
def lowercase_ (self : Optional[int] , __UpperCAmelCase : list[int] , __UpperCAmelCase : int | None = None ) -> None:
"""simple docstring"""
UpperCAmelCase__ = TypeError("Row must be a list containing all ints and/or floats" )
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise type_error
for value in row:
if not isinstance(__UpperCAmelCase , (int, float) ):
raise type_error
if len(__UpperCAmelCase ) != self.num_columns:
raise ValueError(
"Row must be equal in length to the other rows in the matrix" )
if position is None:
self.rows.append(__UpperCAmelCase )
else:
UpperCAmelCase__ = self.rows[0:position] + [row] + self.rows[position:]
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : list[int] , __UpperCAmelCase : int | None = None ) -> None:
"""simple docstring"""
UpperCAmelCase__ = TypeError(
"Column must be a list containing all ints and/or floats" )
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise type_error
for value in column:
if not isinstance(__UpperCAmelCase , (int, float) ):
raise type_error
if len(__UpperCAmelCase ) != self.num_rows:
raise ValueError(
"Column must be equal in length to the other columns in the matrix" )
if position is None:
UpperCAmelCase__ = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
UpperCAmelCase__ = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__(self : Any , __UpperCAmelCase : object ) -> bool:
"""simple docstring"""
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return NotImplemented
return self.rows == other.rows
def __ne__(self : int , __UpperCAmelCase : object ) -> bool:
"""simple docstring"""
return not self == other
def __neg__(self : Dict ) -> Matrix:
"""simple docstring"""
return self * -1
def __add__(self : Dict , __UpperCAmelCase : Matrix ) -> Matrix:
"""simple docstring"""
if self.order != other.order:
raise ValueError("Addition requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__(self : Optional[Any] , __UpperCAmelCase : Matrix ) -> Matrix:
"""simple docstring"""
if self.order != other.order:
raise ValueError("Subtraction requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__(self : Tuple , __UpperCAmelCase : Matrix | int | float ) -> Matrix:
"""simple docstring"""
if isinstance(__UpperCAmelCase , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
if self.num_columns != other.num_rows:
raise ValueError(
"The number of columns in the first matrix must "
"be equal to the number of rows in the second" )
return Matrix(
[
[Matrix.dot_product(__UpperCAmelCase , __UpperCAmelCase ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
"A Matrix can only be multiplied by an int, float, or another matrix" )
def __pow__(self : List[Any] , __UpperCAmelCase : int ) -> Matrix:
"""simple docstring"""
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("A Matrix can only be raised to the power of an int" )
if not self.is_square:
raise ValueError("Only square matrices can be raised to a power" )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"Only invertable matrices can be raised to a negative power" )
UpperCAmelCase__ = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def lowercase_ (cls : Dict , __UpperCAmelCase : list[int] , __UpperCAmelCase : list[int] ) -> int:
"""simple docstring"""
return sum(row[i] * column[i] for i in range(len(__UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 | 0 |
from statistics import mean
import numpy as np
def __lowercase ( a__ , a__ , a__ , a__ ) -> list:
__SCREAMING_SNAKE_CASE = 0
# Number of processes finished
__SCREAMING_SNAKE_CASE = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
__SCREAMING_SNAKE_CASE = [0] * no_of_process
# List to include calculation results
__SCREAMING_SNAKE_CASE = [0] * no_of_process
# Sort by arrival time.
__SCREAMING_SNAKE_CASE = [burst_time[i] for i in np.argsort(__A )]
__SCREAMING_SNAKE_CASE = [process_name[i] for i in np.argsort(__A )]
arrival_time.sort()
while no_of_process > finished_process_count:
__SCREAMING_SNAKE_CASE = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
__SCREAMING_SNAKE_CASE = arrival_time[i]
__SCREAMING_SNAKE_CASE = 0
# Index showing the location of the process being performed
__SCREAMING_SNAKE_CASE = 0
# Saves the current response ratio.
__SCREAMING_SNAKE_CASE = 0
for i in range(0 , __A ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
__SCREAMING_SNAKE_CASE = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
__SCREAMING_SNAKE_CASE = temp
__SCREAMING_SNAKE_CASE = i
# Calculate the turn around time
__SCREAMING_SNAKE_CASE = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
__SCREAMING_SNAKE_CASE = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def __lowercase ( a__ , a__ , a__ , a__ ) -> list:
__SCREAMING_SNAKE_CASE = [0] * no_of_process
for i in range(0 , __A ):
__SCREAMING_SNAKE_CASE = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
lowerCAmelCase__ : Tuple =5
lowerCAmelCase__ : Union[str, Any] =['''A''', '''B''', '''C''', '''D''', '''E''']
lowerCAmelCase__ : int =[1, 2, 3, 4, 5]
lowerCAmelCase__ : List[str] =[1, 2, 3, 4, 5]
lowerCAmelCase__ : Optional[Any] =calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
lowerCAmelCase__ : Any =calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print('''Process name \tArrival time \tBurst time \tTurn around time \tWaiting time''')
for i in range(0, no_of_process):
print(
F'''{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'''
F'''{turn_around_time[i]}\t\t\t{waiting_time[i]}'''
)
print(F'''average waiting time : {mean(waiting_time):.5f}''')
print(F'''average turn around time : {mean(turn_around_time):.5f}''')
| 257 | import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'vocab_file': 'vocab.json',
'tokenizer_config_file': 'tokenizer_config.json',
'merges_file': 'merges.txt',
}
UpperCamelCase__ = {
'vocab_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'
),
},
'tokenizer_config_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'
),
},
'merges_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'
),
},
}
UpperCamelCase__ = '</w>'
UpperCamelCase__ = '@@ '
def lowerCAmelCase_ ( __A ) -> str:
'''simple docstring'''
UpperCAmelCase__ = set()
UpperCAmelCase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase__ = char
return pairs
# Speech2Text2 has no max input length
UpperCamelCase__ = {'facebook/s2t-wav2vec2-large-en-de': 1_0_2_4}
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : str = VOCAB_FILES_NAMES
__UpperCAmelCase : str = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Dict = ['input_ids', 'attention_mask']
def __init__(self : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict="<s>" , __UpperCAmelCase : Tuple="<pad>" , __UpperCAmelCase : str="</s>" , __UpperCAmelCase : int="<unk>" , __UpperCAmelCase : List[str]=False , __UpperCAmelCase : str=None , **__UpperCAmelCase : Optional[Any] , ) -> Tuple:
"""simple docstring"""
super().__init__(
unk_token=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , **__UpperCAmelCase , )
UpperCAmelCase__ = do_lower_case
with open(__UpperCAmelCase , encoding="utf-8" ) as vocab_handle:
UpperCAmelCase__ = json.load(__UpperCAmelCase )
UpperCAmelCase__ = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""" )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
else:
with open(__UpperCAmelCase , encoding="utf-8" ) as merges_handle:
UpperCAmelCase__ = merges_handle.read().split("\n" )[:-1]
UpperCAmelCase__ = [tuple(merge.split()[:2] ) for merge in merges]
UpperCAmelCase__ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
UpperCAmelCase__ = {}
@property
def lowercase_ (self : List[str] ) -> int:
"""simple docstring"""
return len(self.decoder )
def lowercase_ (self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase_ (self : Dict , __UpperCAmelCase : Union[str, Any] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
UpperCAmelCase__ = get_pairs(__UpperCAmelCase )
if not pairs:
return token
while True:
UpperCAmelCase__ = min(__UpperCAmelCase , key=lambda __UpperCAmelCase : self.bpe_ranks.get(__UpperCAmelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase__ , UpperCAmelCase__ = bigram
UpperCAmelCase__ = []
UpperCAmelCase__ = 0
while i < len(__UpperCAmelCase ):
try:
UpperCAmelCase__ = word.index(__UpperCAmelCase , __UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase__ = j
if word[i] == first and i < len(__UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase__ = tuple(__UpperCAmelCase )
UpperCAmelCase__ = new_word
if len(__UpperCAmelCase ) == 1:
break
else:
UpperCAmelCase__ = get_pairs(__UpperCAmelCase )
UpperCAmelCase__ = " ".join(__UpperCAmelCase )
if word == "\n " + BPE_TOKEN_MERGES:
UpperCAmelCase__ = "\n" + BPE_TOKEN_MERGES
if word.endswith(__UpperCAmelCase ):
UpperCAmelCase__ = word.replace(__UpperCAmelCase , "" )
UpperCAmelCase__ = word.replace(" " , __UpperCAmelCase )
UpperCAmelCase__ = word
return word
def lowercase_ (self : Tuple , __UpperCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
if self.bpe_ranks is None:
raise ValueError(
"This tokenizer was instantiated without a `merges.txt` file, so"
" that it can only be used for decoding, not for encoding."
"Make sure to provide `merges.txt` file at instantiation to enable "
"encoding." )
if self.do_lower_case:
UpperCAmelCase__ = text.lower()
UpperCAmelCase__ = text.split()
UpperCAmelCase__ = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(__UpperCAmelCase ).split(" " ) ) )
return split_tokens
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : str ) -> int:
"""simple docstring"""
return self.encoder.get(__UpperCAmelCase , self.encoder.get(self.unk_token ) )
def lowercase_ (self : Any , __UpperCAmelCase : int ) -> str:
"""simple docstring"""
UpperCAmelCase__ = self.decoder.get(__UpperCAmelCase , self.unk_token )
return result
def lowercase_ (self : Dict , __UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = " ".join(__UpperCAmelCase )
# make sure @@ tokens are concatenated
UpperCAmelCase__ = "".join(string.split(__UpperCAmelCase ) )
return string
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase__ = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase__ = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__UpperCAmelCase , ensure_ascii=__UpperCAmelCase ) + "\n" )
UpperCAmelCase__ = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
UpperCAmelCase__ = token_index
writer.write(" ".join(__UpperCAmelCase ) + "\n" )
index += 1
return (vocab_file, merges_file)
| 65 | 0 |
"""simple docstring"""
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
__snake_case = """base_with_context"""
def __lowerCAmelCase ( lowercase : Any , lowercase : Union[str, Any] ) -> int:
"""simple docstring"""
snake_case : int = nn.Parameter(torch.FloatTensor(weights["token_embedder"]["embedding"] ) )
snake_case : List[Any] = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=__A )
for lyr_num, lyr in enumerate(model.encoders ):
snake_case : Optional[Any] = weights[F'layers_{lyr_num}']
snake_case : str = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
snake_case : int = ly_weight["attention"]
snake_case : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
snake_case : str = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
snake_case : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
snake_case : int = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
snake_case : Any = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
snake_case : Dict = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
snake_case : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
snake_case : str = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
snake_case : Dict = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def __lowerCAmelCase ( lowercase : Any , lowercase : int ) -> Tuple:
"""simple docstring"""
snake_case : Dict = nn.Parameter(torch.FloatTensor(weights["input_proj"]["kernel"].T ) )
snake_case : Optional[int] = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=__A )
for lyr_num, lyr in enumerate(model.encoders ):
snake_case : str = weights[F'layers_{lyr_num}']
snake_case : List[Any] = ly_weight["attention"]
snake_case : Any = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
snake_case : str = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
snake_case : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
snake_case : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
snake_case : Tuple = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
snake_case : Any = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
snake_case : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
snake_case : Dict = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
snake_case : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
snake_case : int = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def __lowerCAmelCase ( lowercase : int , lowercase : Any ) -> List[Any]:
"""simple docstring"""
snake_case : str = nn.Parameter(torch.FloatTensor(weights["time_emb_dense0"]["kernel"].T ) )
snake_case : Any = nn.Parameter(torch.FloatTensor(weights["time_emb_dense1"]["kernel"].T ) )
snake_case : Tuple = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=__A )
snake_case : str = nn.Parameter(
torch.FloatTensor(weights["continuous_inputs_projection"]["kernel"].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
snake_case : Union[str, Any] = weights[F'layers_{lyr_num}']
snake_case : str = nn.Parameter(
torch.FloatTensor(ly_weight["pre_self_attention_layer_norm"]["scale"] ) )
snake_case : Optional[int] = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_0"]["DenseGeneral_0"]["kernel"].T ) )
snake_case : Optional[int] = ly_weight["self_attention"]
snake_case : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
snake_case : str = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
snake_case : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
snake_case : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
snake_case : str = ly_weight["MultiHeadDotProductAttention_0"]
snake_case : Dict = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
snake_case : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
snake_case : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
snake_case : str = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
snake_case : Any = nn.Parameter(
torch.FloatTensor(ly_weight["pre_cross_attention_layer_norm"]["scale"] ) )
snake_case : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
snake_case : Optional[Any] = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_1"]["DenseGeneral_0"]["kernel"].T ) )
snake_case : List[str] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
snake_case : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
snake_case : str = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
snake_case : Optional[int] = nn.Parameter(torch.FloatTensor(weights["decoder_norm"]["scale"] ) )
snake_case : Optional[Any] = nn.Parameter(torch.FloatTensor(weights["spec_out_dense"]["kernel"].T ) )
return model
def __lowerCAmelCase ( lowercase : str ) -> int:
"""simple docstring"""
snake_case : Optional[int] = checkpoints.load_tax_checkpoint(args.checkpoint_path )
snake_case : Any = jnp.tree_util.tree_map(onp.array , __A )
snake_case : int = [
"from __gin__ import dynamic_registration",
"from music_spectrogram_diffusion.models.diffusion import diffusion_utils",
"diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0",
"diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()",
]
snake_case : Any = os.path.join(args.checkpoint_path , ".." , "config.gin" )
snake_case : List[str] = inference.parse_training_gin_file(__A , __A )
snake_case : int = inference.InferenceModel(args.checkpoint_path , __A )
snake_case : Union[str, Any] = DDPMScheduler(beta_schedule="squaredcos_cap_v2" , variance_type="fixed_large" )
snake_case : str = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["inputs"] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
snake_case : str = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["targets_context"] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
snake_case : Union[str, Any] = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["targets_context"] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
snake_case : Any = load_notes_encoder(ta_checkpoint["target"]["token_encoder"] , __A )
snake_case : Union[str, Any] = load_continuous_encoder(ta_checkpoint["target"]["continuous_encoder"] , __A )
snake_case : Union[str, Any] = load_decoder(ta_checkpoint["target"]["decoder"] , __A )
snake_case : Dict = OnnxRuntimeModel.from_pretrained("kashif/soundstream_mel_decoder" )
snake_case : Any = SpectrogramDiffusionPipeline(
notes_encoder=__A , continuous_encoder=__A , decoder=__A , scheduler=__A , melgan=__A , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument("""--output_path""", default=None, type=str, required=True, help="""Path to the converted model.""")
parser.add_argument(
"""--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not."""
)
parser.add_argument(
"""--checkpoint_path""",
default=F'''{MODEL}/checkpoint_500000''',
type=str,
required=False,
help="""Path to the original jax model checkpoint.""",
)
__snake_case = parser.parse_args()
main(args)
| 203 | from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : torch.FloatTensor
class A ( nn.Module ):
def __init__(self : Union[str, Any] , __UpperCAmelCase : int=3 , __UpperCAmelCase : Dict=3 , __UpperCAmelCase : Optional[Any]=("DownEncoderBlock2D",) , __UpperCAmelCase : int=(6_4,) , __UpperCAmelCase : Union[str, Any]=2 , __UpperCAmelCase : Any=3_2 , __UpperCAmelCase : str="silu" , __UpperCAmelCase : Any=True , ) -> Dict:
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = layers_per_block
UpperCAmelCase__ = torch.nn.Convad(
__UpperCAmelCase , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
UpperCAmelCase__ = None
UpperCAmelCase__ = nn.ModuleList([] )
# down
UpperCAmelCase__ = block_out_channels[0]
for i, down_block_type in enumerate(__UpperCAmelCase ):
UpperCAmelCase__ = output_channel
UpperCAmelCase__ = block_out_channels[i]
UpperCAmelCase__ = i == len(__UpperCAmelCase ) - 1
UpperCAmelCase__ = get_down_block(
__UpperCAmelCase , num_layers=self.layers_per_block , in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=__UpperCAmelCase , resnet_groups=__UpperCAmelCase , attention_head_dim=__UpperCAmelCase , temb_channels=__UpperCAmelCase , )
self.down_blocks.append(__UpperCAmelCase )
# mid
UpperCAmelCase__ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=__UpperCAmelCase , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=__UpperCAmelCase , temb_channels=__UpperCAmelCase , )
# out
UpperCAmelCase__ = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=__UpperCAmelCase , eps=1E-6 )
UpperCAmelCase__ = nn.SiLU()
UpperCAmelCase__ = 2 * out_channels if double_z else out_channels
UpperCAmelCase__ = nn.Convad(block_out_channels[-1] , __UpperCAmelCase , 3 , padding=1 )
UpperCAmelCase__ = False
def lowercase_ (self : List[Any] , __UpperCAmelCase : int ) -> str:
"""simple docstring"""
UpperCAmelCase__ = x
UpperCAmelCase__ = self.conv_in(__UpperCAmelCase )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__UpperCAmelCase : int ):
def custom_forward(*__UpperCAmelCase : Optional[Any] ):
return module(*__UpperCAmelCase )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
UpperCAmelCase__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(__UpperCAmelCase ) , __UpperCAmelCase , use_reentrant=__UpperCAmelCase )
# middle
UpperCAmelCase__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __UpperCAmelCase , use_reentrant=__UpperCAmelCase )
else:
for down_block in self.down_blocks:
UpperCAmelCase__ = torch.utils.checkpoint.checkpoint(create_custom_forward(__UpperCAmelCase ) , __UpperCAmelCase )
# middle
UpperCAmelCase__ = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , __UpperCAmelCase )
else:
# down
for down_block in self.down_blocks:
UpperCAmelCase__ = down_block(__UpperCAmelCase )
# middle
UpperCAmelCase__ = self.mid_block(__UpperCAmelCase )
# post-process
UpperCAmelCase__ = self.conv_norm_out(__UpperCAmelCase )
UpperCAmelCase__ = self.conv_act(__UpperCAmelCase )
UpperCAmelCase__ = self.conv_out(__UpperCAmelCase )
return sample
class A ( nn.Module ):
def __init__(self : List[Any] , __UpperCAmelCase : str=3 , __UpperCAmelCase : Union[str, Any]=3 , __UpperCAmelCase : Optional[int]=("UpDecoderBlock2D",) , __UpperCAmelCase : str=(6_4,) , __UpperCAmelCase : Optional[Any]=2 , __UpperCAmelCase : Tuple=3_2 , __UpperCAmelCase : Any="silu" , __UpperCAmelCase : Any="group" , ) -> Dict:
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = layers_per_block
UpperCAmelCase__ = nn.Convad(
__UpperCAmelCase , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
UpperCAmelCase__ = None
UpperCAmelCase__ = nn.ModuleList([] )
UpperCAmelCase__ = in_channels if norm_type == "spatial" else None
# mid
UpperCAmelCase__ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=__UpperCAmelCase , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=__UpperCAmelCase , temb_channels=__UpperCAmelCase , )
# up
UpperCAmelCase__ = list(reversed(__UpperCAmelCase ) )
UpperCAmelCase__ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__UpperCAmelCase ):
UpperCAmelCase__ = output_channel
UpperCAmelCase__ = reversed_block_out_channels[i]
UpperCAmelCase__ = i == len(__UpperCAmelCase ) - 1
UpperCAmelCase__ = get_up_block(
__UpperCAmelCase , num_layers=self.layers_per_block + 1 , in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , prev_output_channel=__UpperCAmelCase , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=__UpperCAmelCase , resnet_groups=__UpperCAmelCase , attention_head_dim=__UpperCAmelCase , temb_channels=__UpperCAmelCase , resnet_time_scale_shift=__UpperCAmelCase , )
self.up_blocks.append(__UpperCAmelCase )
UpperCAmelCase__ = output_channel
# out
if norm_type == "spatial":
UpperCAmelCase__ = SpatialNorm(block_out_channels[0] , __UpperCAmelCase )
else:
UpperCAmelCase__ = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=__UpperCAmelCase , eps=1E-6 )
UpperCAmelCase__ = nn.SiLU()
UpperCAmelCase__ = nn.Convad(block_out_channels[0] , __UpperCAmelCase , 3 , padding=1 )
UpperCAmelCase__ = False
def lowercase_ (self : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict=None ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = z
UpperCAmelCase__ = self.conv_in(__UpperCAmelCase )
UpperCAmelCase__ = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__UpperCAmelCase : str ):
def custom_forward(*__UpperCAmelCase : List[str] ):
return module(*__UpperCAmelCase )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
UpperCAmelCase__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __UpperCAmelCase , __UpperCAmelCase , use_reentrant=__UpperCAmelCase )
UpperCAmelCase__ = sample.to(__UpperCAmelCase )
# up
for up_block in self.up_blocks:
UpperCAmelCase__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(__UpperCAmelCase ) , __UpperCAmelCase , __UpperCAmelCase , use_reentrant=__UpperCAmelCase )
else:
# middle
UpperCAmelCase__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = sample.to(__UpperCAmelCase )
# up
for up_block in self.up_blocks:
UpperCAmelCase__ = torch.utils.checkpoint.checkpoint(create_custom_forward(__UpperCAmelCase ) , __UpperCAmelCase , __UpperCAmelCase )
else:
# middle
UpperCAmelCase__ = self.mid_block(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = sample.to(__UpperCAmelCase )
# up
for up_block in self.up_blocks:
UpperCAmelCase__ = up_block(__UpperCAmelCase , __UpperCAmelCase )
# post-process
if latent_embeds is None:
UpperCAmelCase__ = self.conv_norm_out(__UpperCAmelCase )
else:
UpperCAmelCase__ = self.conv_norm_out(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = self.conv_act(__UpperCAmelCase )
UpperCAmelCase__ = self.conv_out(__UpperCAmelCase )
return sample
class A ( nn.Module ):
def __init__(self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Union[str, Any]="random" , __UpperCAmelCase : Dict=False , __UpperCAmelCase : Union[str, Any]=True ) -> Dict:
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = n_e
UpperCAmelCase__ = vq_embed_dim
UpperCAmelCase__ = beta
UpperCAmelCase__ = legacy
UpperCAmelCase__ = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
UpperCAmelCase__ = remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
UpperCAmelCase__ = self.used.shape[0]
UpperCAmelCase__ = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
UpperCAmelCase__ = self.re_embed
UpperCAmelCase__ = self.re_embed + 1
print(
f"""Remapping {self.n_e} indices to {self.re_embed} indices. """
f"""Using {self.unknown_index} for unknown indices.""" )
else:
UpperCAmelCase__ = n_e
UpperCAmelCase__ = sane_index_shape
def lowercase_ (self : str , __UpperCAmelCase : str ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = inds.shape
assert len(__UpperCAmelCase ) > 1
UpperCAmelCase__ = inds.reshape(ishape[0] , -1 )
UpperCAmelCase__ = self.used.to(__UpperCAmelCase )
UpperCAmelCase__ = (inds[:, :, None] == used[None, None, ...]).long()
UpperCAmelCase__ = match.argmax(-1 )
UpperCAmelCase__ = match.sum(2 ) < 1
if self.unknown_index == "random":
UpperCAmelCase__ = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
UpperCAmelCase__ = self.unknown_index
return new.reshape(__UpperCAmelCase )
def lowercase_ (self : Tuple , __UpperCAmelCase : Optional[int] ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = inds.shape
assert len(__UpperCAmelCase ) > 1
UpperCAmelCase__ = inds.reshape(ishape[0] , -1 )
UpperCAmelCase__ = self.used.to(__UpperCAmelCase )
if self.re_embed > self.used.shape[0]: # extra token
UpperCAmelCase__ = 0 # simply set to zero
UpperCAmelCase__ = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , __UpperCAmelCase )
return back.reshape(__UpperCAmelCase )
def lowercase_ (self : Optional[Any] , __UpperCAmelCase : Dict ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = z.permute(0 , 2 , 3 , 1 ).contiguous()
UpperCAmelCase__ = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
UpperCAmelCase__ = torch.argmin(torch.cdist(__UpperCAmelCase , self.embedding.weight ) , dim=1 )
UpperCAmelCase__ = self.embedding(__UpperCAmelCase ).view(z.shape )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
# compute loss for embedding
if not self.legacy:
UpperCAmelCase__ = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
UpperCAmelCase__ = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
UpperCAmelCase__ = z + (z_q - z).detach()
# reshape back to match original input shape
UpperCAmelCase__ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
UpperCAmelCase__ = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
UpperCAmelCase__ = self.remap_to_used(__UpperCAmelCase )
UpperCAmelCase__ = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
UpperCAmelCase__ = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def lowercase_ (self : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] ) -> Any:
"""simple docstring"""
if self.remap is not None:
UpperCAmelCase__ = indices.reshape(shape[0] , -1 ) # add batch axis
UpperCAmelCase__ = self.unmap_to_all(__UpperCAmelCase )
UpperCAmelCase__ = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
UpperCAmelCase__ = self.embedding(__UpperCAmelCase )
if shape is not None:
UpperCAmelCase__ = z_q.view(__UpperCAmelCase )
# reshape back to match original input shape
UpperCAmelCase__ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class A ( UpperCAmelCase_ ):
def __init__(self : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : str=False ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = parameters
UpperCAmelCase__ , UpperCAmelCase__ = torch.chunk(__UpperCAmelCase , 2 , dim=1 )
UpperCAmelCase__ = torch.clamp(self.logvar , -30.0 , 20.0 )
UpperCAmelCase__ = deterministic
UpperCAmelCase__ = torch.exp(0.5 * self.logvar )
UpperCAmelCase__ = torch.exp(self.logvar )
if self.deterministic:
UpperCAmelCase__ = UpperCAmelCase__ = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : Optional[torch.Generator] = None ) -> torch.FloatTensor:
"""simple docstring"""
UpperCAmelCase__ = randn_tensor(
self.mean.shape , generator=__UpperCAmelCase , device=self.parameters.device , dtype=self.parameters.dtype )
UpperCAmelCase__ = self.mean + self.std * sample
return x
def lowercase_ (self : str , __UpperCAmelCase : int=None ) -> Any:
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def lowercase_ (self : Dict , __UpperCAmelCase : Tuple , __UpperCAmelCase : Any=[1, 2, 3] ) -> Dict:
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
UpperCAmelCase__ = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=__UpperCAmelCase )
def lowercase_ (self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return self.mean
| 65 | 0 |
"""simple docstring"""
import torch
from torch import nn
class _UpperCAmelCase ( nn.Module):
def __init__( self : Any , lowercase_ : Any , lowercase_ : str , lowercase_ : str , lowercase_ : str , lowercase_ : Dict=1 , lowercase_ : int=False ):
super().__init__()
snake_case_ : Dict = n_token
snake_case_ : Optional[int] = d_embed
snake_case_ : List[Any] = d_proj
snake_case_ : List[Any] = cutoffs + [n_token]
snake_case_ : Union[str, Any] = [0] + self.cutoffs
snake_case_ : Dict = div_val
snake_case_ : Optional[int] = self.cutoffs[0]
snake_case_ : Optional[Any] = len(self.cutoffs ) - 1
snake_case_ : Union[str, Any] = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
snake_case_ : int = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
snake_case_ : str = nn.Parameter(torch.zeros(self.n_clusters ) )
snake_case_ : Dict = nn.ModuleList()
snake_case_ : List[Any] = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(__UpperCAmelCase , __UpperCAmelCase ) ) )
else:
self.out_projs.append(__UpperCAmelCase )
self.out_layers.append(nn.Linear(__UpperCAmelCase , __UpperCAmelCase ) )
else:
for i in range(len(self.cutoffs ) ):
snake_case_, snake_case_ : Optional[int] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
snake_case_ : Tuple = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(__UpperCAmelCase , __UpperCAmelCase ) ) )
self.out_layers.append(nn.Linear(__UpperCAmelCase , r_idx - l_idx ) )
snake_case_ : Tuple = keep_order
def _snake_case ( self : int , lowercase_ : Any , lowercase_ : Any , lowercase_ : List[Any] , lowercase_ : Any ):
if proj is None:
snake_case_ : List[str] = nn.functional.linear(__UpperCAmelCase , __UpperCAmelCase , bias=__UpperCAmelCase )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
snake_case_ : Dict = nn.functional.linear(__UpperCAmelCase , proj.t().contiguous() )
snake_case_ : Dict = nn.functional.linear(__UpperCAmelCase , __UpperCAmelCase , bias=__UpperCAmelCase )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def _snake_case ( self : List[str] , lowercase_ : List[str] , lowercase_ : Union[str, Any]=None , lowercase_ : str=False ):
if labels is not None:
# Shift so that tokens < n predict n
snake_case_ : Dict = hidden[..., :-1, :].contiguous()
snake_case_ : Tuple = labels[..., 1:].contiguous()
snake_case_ : List[str] = hidden.view(-1 , hidden.size(-1 ) )
snake_case_ : Optional[Any] = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('''Input and labels should have the same size in the batch dimension.''' )
else:
snake_case_ : Union[str, Any] = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
snake_case_ : Tuple = self._compute_logit(__UpperCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
snake_case_ : Optional[Any] = labels != -100
snake_case_ : List[Any] = torch.zeros_like(__UpperCAmelCase , dtype=hidden.dtype , device=hidden.device )
snake_case_ : Union[str, Any] = (
-nn.functional.log_softmax(__UpperCAmelCase , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
snake_case_ : Optional[Any] = nn.functional.log_softmax(__UpperCAmelCase , dim=-1 )
else:
# construct weights and biases
snake_case_, snake_case_ : Optional[int] = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
snake_case_, snake_case_ : Dict = self.cutoff_ends[i], self.cutoff_ends[i + 1]
snake_case_ : Tuple = self.out_layers[0].weight[l_idx:r_idx]
snake_case_ : List[Any] = self.out_layers[0].bias[l_idx:r_idx]
else:
snake_case_ : Optional[Any] = self.out_layers[i].weight
snake_case_ : List[Any] = self.out_layers[i].bias
if i == 0:
snake_case_ : Any = torch.cat([weight_i, self.cluster_weight] , dim=0 )
snake_case_ : List[Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(__UpperCAmelCase )
biases.append(__UpperCAmelCase )
snake_case_, snake_case_, snake_case_ : int = weights[0], biases[0], self.out_projs[0]
snake_case_ : int = self._compute_logit(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
snake_case_ : Any = nn.functional.log_softmax(__UpperCAmelCase , dim=1 )
if labels is None:
snake_case_ : List[str] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
snake_case_ : Optional[int] = torch.zeros_like(__UpperCAmelCase , dtype=hidden.dtype , device=hidden.device )
snake_case_ : int = 0
snake_case_ : str = [0] + self.cutoffs
for i in range(len(__UpperCAmelCase ) - 1 ):
snake_case_, snake_case_ : Optional[Any] = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
snake_case_ : List[Any] = (labels >= l_idx) & (labels < r_idx)
snake_case_ : Union[str, Any] = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
snake_case_ : Optional[int] = labels.index_select(0 , __UpperCAmelCase ) - l_idx
snake_case_ : Tuple = head_logprob.index_select(0 , __UpperCAmelCase )
snake_case_ : int = hidden.index_select(0 , __UpperCAmelCase )
else:
snake_case_ : Dict = hidden
if i == 0:
if labels is not None:
snake_case_ : Optional[Any] = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
snake_case_ : str = head_logprob[:, : self.cutoffs[0]]
else:
snake_case_, snake_case_, snake_case_ : Optional[int] = weights[i], biases[i], self.out_projs[i]
snake_case_ : List[Any] = self._compute_logit(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
snake_case_ : Optional[int] = nn.functional.log_softmax(__UpperCAmelCase , dim=1 )
snake_case_ : List[Any] = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
snake_case_ : Dict = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
snake_case_ : Optional[int] = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
snake_case_ : List[Any] = logprob_i
if labels is not None:
if (hasattr(self , '''keep_order''' ) and self.keep_order) or keep_order:
out.index_copy_(0 , __UpperCAmelCase , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def _snake_case ( self : Optional[int] , lowercase_ : Any ):
if self.n_clusters == 0:
snake_case_ : Any = self._compute_logit(__UpperCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(__UpperCAmelCase , dim=-1 )
else:
# construct weights and biases
snake_case_, snake_case_ : Dict = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
snake_case_, snake_case_ : int = self.cutoff_ends[i], self.cutoff_ends[i + 1]
snake_case_ : Optional[Any] = self.out_layers[0].weight[l_idx:r_idx]
snake_case_ : Optional[int] = self.out_layers[0].bias[l_idx:r_idx]
else:
snake_case_ : str = self.out_layers[i].weight
snake_case_ : Any = self.out_layers[i].bias
if i == 0:
snake_case_ : Any = torch.cat([weight_i, self.cluster_weight] , dim=0 )
snake_case_ : List[str] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(__UpperCAmelCase )
biases.append(__UpperCAmelCase )
snake_case_, snake_case_, snake_case_ : Union[str, Any] = weights[0], biases[0], self.out_projs[0]
snake_case_ : Optional[int] = self._compute_logit(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
snake_case_ : Any = hidden.new_empty((head_logit.size(0 ), self.n_token) )
snake_case_ : Dict = nn.functional.log_softmax(__UpperCAmelCase , dim=1 )
snake_case_ : Optional[Any] = [0] + self.cutoffs
for i in range(len(__UpperCAmelCase ) - 1 ):
snake_case_, snake_case_ : Optional[int] = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
snake_case_ : Optional[Any] = head_logprob[:, : self.cutoffs[0]]
else:
snake_case_, snake_case_, snake_case_ : str = weights[i], biases[i], self.out_projs[i]
snake_case_ : Optional[Any] = self._compute_logit(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
snake_case_ : Optional[int] = nn.functional.log_softmax(__UpperCAmelCase , dim=1 )
snake_case_ : int = head_logprob[:, -i] + tail_logprob_i
snake_case_ : List[str] = logprob_i
return out
| 264 | import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def lowerCAmelCase_ ( __A, __A=False ) -> Any:
'''simple docstring'''
try:
UpperCAmelCase__ = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCAmelCase__ = default
else:
# KEY is set, convert it to True or False.
try:
UpperCAmelCase__ = strtobool(__A )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"""If set, {key} must be yes or no.""" )
return _value
UpperCamelCase__ = parse_flag_from_env('RUN_SLOW', default=False)
UpperCamelCase__ = parse_flag_from_env('RUN_REMOTE', default=False)
UpperCamelCase__ = parse_flag_from_env('RUN_LOCAL', default=True)
UpperCamelCase__ = parse_flag_from_env('RUN_PACKAGED', default=True)
# Compression
UpperCamelCase__ = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4')
UpperCamelCase__ = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr')
UpperCamelCase__ = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard')
# Audio
UpperCamelCase__ = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'),
reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ',
)
# Beam
UpperCamelCase__ = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'),
reason='test requires apache-beam and a compatible dill version',
)
# Dill-cloudpickle compatibility
UpperCamelCase__ = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('0.3.2'),
reason='test requires dill>0.3.2 for cloudpickle compatibility',
)
# Windows
UpperCamelCase__ = pytest.mark.skipif(
sys.platform == 'win32',
reason='test should not be run on Windows',
)
def lowerCAmelCase_ ( __A ) -> Any:
'''simple docstring'''
try:
import faiss # noqa
except ImportError:
UpperCAmelCase__ = unittest.skip("test requires faiss" )(__A )
return test_case
def lowerCAmelCase_ ( __A ) -> Optional[Any]:
'''simple docstring'''
try:
import regex # noqa
except ImportError:
UpperCAmelCase__ = unittest.skip("test requires regex" )(__A )
return test_case
def lowerCAmelCase_ ( __A ) -> List[str]:
'''simple docstring'''
try:
import elasticsearch # noqa
except ImportError:
UpperCAmelCase__ = unittest.skip("test requires elasticsearch" )(__A )
return test_case
def lowerCAmelCase_ ( __A ) -> List[Any]:
'''simple docstring'''
try:
import sqlalchemy # noqa
except ImportError:
UpperCAmelCase__ = unittest.skip("test requires sqlalchemy" )(__A )
return test_case
def lowerCAmelCase_ ( __A ) -> List[str]:
'''simple docstring'''
if not config.TORCH_AVAILABLE:
UpperCAmelCase__ = unittest.skip("test requires PyTorch" )(__A )
return test_case
def lowerCAmelCase_ ( __A ) -> Union[str, Any]:
'''simple docstring'''
if not config.TF_AVAILABLE:
UpperCAmelCase__ = unittest.skip("test requires TensorFlow" )(__A )
return test_case
def lowerCAmelCase_ ( __A ) -> Any:
'''simple docstring'''
if not config.JAX_AVAILABLE:
UpperCAmelCase__ = unittest.skip("test requires JAX" )(__A )
return test_case
def lowerCAmelCase_ ( __A ) -> int:
'''simple docstring'''
if not config.PIL_AVAILABLE:
UpperCAmelCase__ = unittest.skip("test requires Pillow" )(__A )
return test_case
def lowerCAmelCase_ ( __A ) -> Tuple:
'''simple docstring'''
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("test requires transformers" )(__A )
else:
return test_case
def lowerCAmelCase_ ( __A ) -> Dict:
'''simple docstring'''
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("test requires tiktoken" )(__A )
else:
return test_case
def lowerCAmelCase_ ( __A ) -> Optional[Any]:
'''simple docstring'''
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("test requires spacy" )(__A )
else:
return test_case
def lowerCAmelCase_ ( __A ) -> Optional[int]:
'''simple docstring'''
def _require_spacy_model(__A ):
try:
import spacy # noqa F401
spacy.load(__A )
except ImportError:
return unittest.skip("test requires spacy" )(__A )
except OSError:
return unittest.skip("test requires spacy model '{}'".format(__A ) )(__A )
else:
return test_case
return _require_spacy_model
def lowerCAmelCase_ ( __A ) -> Optional[Any]:
'''simple docstring'''
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("test requires pyspark" )(__A )
else:
return test_case
def lowerCAmelCase_ ( __A ) -> Tuple:
'''simple docstring'''
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("test requires joblibspark" )(__A )
else:
return test_case
def lowerCAmelCase_ ( __A ) -> Optional[int]:
'''simple docstring'''
if not _run_slow_tests or _run_slow_tests == 0:
UpperCAmelCase__ = unittest.skip("test is slow" )(__A )
return test_case
def lowerCAmelCase_ ( __A ) -> List[Any]:
'''simple docstring'''
if not _run_local_tests or _run_local_tests == 0:
UpperCAmelCase__ = unittest.skip("test is local" )(__A )
return test_case
def lowerCAmelCase_ ( __A ) -> Optional[Any]:
'''simple docstring'''
if not _run_packaged_tests or _run_packaged_tests == 0:
UpperCAmelCase__ = unittest.skip("test is packaged" )(__A )
return test_case
def lowerCAmelCase_ ( __A ) -> Any:
'''simple docstring'''
if not _run_remote_tests or _run_remote_tests == 0:
UpperCAmelCase__ = unittest.skip("test requires remote" )(__A )
return test_case
def lowerCAmelCase_ ( *__A ) -> Optional[int]:
'''simple docstring'''
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(__A ) and name.startswith("test" ):
for decorator in decorators:
UpperCAmelCase__ = decorator(__A )
setattr(cls, __A, __A )
return cls
return decorate
class A ( UpperCAmelCase_ ):
pass
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Union[str, Any] = 0
__UpperCAmelCase : str = 1
__UpperCAmelCase : int = 2
@contextmanager
def lowerCAmelCase_ ( __A=OfflineSimulationMode.CONNECTION_FAILS, __A=1e-16 ) -> List[str]:
'''simple docstring'''
UpperCAmelCase__ = requests.Session().request
def timeout_request(__A, __A, __A, **__A ):
# Change the url to an invalid url so that the connection hangs
UpperCAmelCase__ = "https://10.255.255.1"
if kwargs.get("timeout" ) is None:
raise RequestWouldHangIndefinitelyError(
f"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
UpperCAmelCase__ = timeout
try:
return online_request(__A, __A, **__A )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
UpperCAmelCase__ = url
UpperCAmelCase__ = e.args[0]
UpperCAmelCase__ = (max_retry_error.args[0].replace("10.255.255.1", f"""OfflineMock[{url}]""" ),)
UpperCAmelCase__ = (max_retry_error,)
raise
def raise_connection_error(__A, __A, **__A ):
raise requests.ConnectionError("Offline mode is enabled.", request=__A )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("requests.Session.send", __A ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("requests.Session.request", __A ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("datasets.config.HF_DATASETS_OFFLINE", __A ):
yield
else:
raise ValueError("Please use a value from the OfflineSimulationMode enum." )
@contextmanager
def lowerCAmelCase_ ( *__A, **__A ) -> str:
'''simple docstring'''
UpperCAmelCase__ = str(Path().resolve() )
with tempfile.TemporaryDirectory(*__A, **__A ) as tmp_dir:
try:
os.chdir(__A )
yield
finally:
os.chdir(__A )
@contextmanager
def lowerCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
import gc
gc.collect()
UpperCAmelCase__ = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def lowerCAmelCase_ ( ) -> List[str]:
'''simple docstring'''
import gc
gc.collect()
UpperCAmelCase__ = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def lowerCAmelCase_ ( __A, __A ) -> List[str]:
'''simple docstring'''
return deepcopy(__A ).integers(0, 100, 10 ).tolist() == deepcopy(__A ).integers(0, 100, 10 ).tolist()
def lowerCAmelCase_ ( __A ) -> Optional[int]:
'''simple docstring'''
import decorator
from requests.exceptions import HTTPError
def _wrapper(__A, *__A, **__A ):
try:
return func(*__A, **__A )
except HTTPError as err:
if str(__A ).startswith("500" ) or str(__A ).startswith("502" ):
pytest.xfail(str(__A ) )
raise err
return decorator.decorator(_wrapper, __A )
class A :
def __init__(self : Optional[Any] , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : List[str] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = returncode
UpperCAmelCase__ = stdout
UpperCAmelCase__ = stderr
async def lowerCAmelCase_ ( __A, __A ) -> Optional[int]:
'''simple docstring'''
while True:
UpperCAmelCase__ = await stream.readline()
if line:
callback(__A )
else:
break
async def lowerCAmelCase_ ( __A, __A=None, __A=None, __A=None, __A=False, __A=False ) -> _RunOutput:
'''simple docstring'''
if echo:
print("\nRunning: ", " ".join(__A ) )
UpperCAmelCase__ = await asyncio.create_subprocess_exec(
cmd[0], *cmd[1:], stdin=__A, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, env=__A, )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCAmelCase__ = []
UpperCAmelCase__ = []
def tee(__A, __A, __A, __A="" ):
UpperCAmelCase__ = line.decode("utf-8" ).rstrip()
sink.append(__A )
if not quiet:
print(__A, __A, file=__A )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda __A : tee(__A, __A, sys.stdout, label="stdout:" ) ),
_read_stream(p.stderr, lambda __A : tee(__A, __A, sys.stderr, label="stderr:" ) ),
], timeout=__A, )
return _RunOutput(await p.wait(), __A, __A )
def lowerCAmelCase_ ( __A, __A=None, __A=None, __A=180, __A=False, __A=True ) -> _RunOutput:
'''simple docstring'''
UpperCAmelCase__ = asyncio.get_event_loop()
UpperCAmelCase__ = loop.run_until_complete(
_stream_subprocess(__A, env=__A, stdin=__A, timeout=__A, quiet=__A, echo=__A ) )
UpperCAmelCase__ = " ".join(__A )
if result.returncode > 0:
UpperCAmelCase__ = "\n".join(result.stderr )
raise RuntimeError(
f"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
f"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"""'{cmd_str}' produced no output.""" )
return result
def lowerCAmelCase_ ( ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ = os.environ.get("PYTEST_XDIST_WORKER", "gw0" )
UpperCAmelCase__ = re.sub(r"^gw", "", __A, 0, re.M )
return int(__A )
def lowerCAmelCase_ ( ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ = 29_500
UpperCAmelCase__ = pytest_xdist_worker_id()
return port + uniq_delta
| 65 | 0 |
from collections.abc import Callable
import numpy as np
def a__ ( UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : Dict ) -> np.array:
UpperCAmelCase : List[Any] = int(np.ceil((x_end - xa) / step_size ) )
UpperCAmelCase : Any = np.zeros((n + 1,) )
UpperCAmelCase : Optional[int] = ya
UpperCAmelCase : Any = xa
for k in range(__A ):
UpperCAmelCase : Optional[Any] = y[k] + step_size * ode_func(__A , y[k] )
UpperCAmelCase : Union[str, Any] = y[k] + (
(step_size / 2) * (ode_func(__A , y[k] ) + ode_func(x + step_size , __A ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336 | def lowerCAmelCase_ ( __A, __A ) -> float:
'''simple docstring'''
def get_matched_characters(__A, __A ) -> str:
UpperCAmelCase__ = []
UpperCAmelCase__ = min(len(_stra ), len(_stra ) ) // 2
for i, l in enumerate(_stra ):
UpperCAmelCase__ = int(max(0, i - limit ) )
UpperCAmelCase__ = int(min(i + limit + 1, len(_stra ) ) )
if l in _stra[left:right]:
matched.append(__A )
UpperCAmelCase__ = f"""{_stra[0:_stra.index(__A )]} {_stra[_stra.index(__A ) + 1:]}"""
return "".join(__A )
# matching characters
UpperCAmelCase__ = get_matched_characters(__A, __A )
UpperCAmelCase__ = get_matched_characters(__A, __A )
UpperCAmelCase__ = len(__A )
# transposition
UpperCAmelCase__ = (
len([(ca, ca) for ca, ca in zip(__A, __A ) if ca != ca] ) // 2
)
if not match_count:
UpperCAmelCase__ = 0.0
else:
UpperCAmelCase__ = (
1
/ 3
* (
match_count / len(__A )
+ match_count / len(__A )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
UpperCAmelCase__ = 0
for ca, ca in zip(stra[:4], stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 65 | 0 |
"""simple docstring"""
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
SCREAMING_SNAKE_CASE__:List[str] = [
"""python""",
"""tqdm""",
"""regex""",
"""requests""",
"""packaging""",
"""filelock""",
"""numpy""",
"""tokenizers""",
"""huggingface-hub""",
"""safetensors""",
"""accelerate""",
"""pyyaml""",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def _lowerCamelCase( a , a=None ):
require_version(deps[pkg] , __A )
| 261 | def lowerCAmelCase_ ( __A, __A ) -> None:
'''simple docstring'''
UpperCAmelCase__ = len(__A )
print("The following activities are selected:" )
# The first activity is always selected
UpperCAmelCase__ = 0
print(__A, end="," )
# Consider rest of the activities
for j in range(__A ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(__A, end="," )
UpperCAmelCase__ = j
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ = [1, 3, 0, 5, 8, 5]
UpperCamelCase__ = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 65 | 0 |
"""simple docstring"""
import math
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
return math.sqrt(__A ) * math.sqrt(__A ) == num
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : Tuple = 0
_a : int = n
while left <= right:
_a : Tuple = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
_a : int = mid - 1
else:
_a : Optional[int] = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 294 | import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
UpperCamelCase__ = 'base_with_context'
def lowerCAmelCase_ ( __A, __A ) -> int:
'''simple docstring'''
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights["token_embedder"]["embedding"] ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ), requires_grad=__A )
for lyr_num, lyr in enumerate(model.encoders ):
UpperCAmelCase__ = weights[f"""layers_{lyr_num}"""]
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
UpperCAmelCase__ = ly_weight["attention"]
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def lowerCAmelCase_ ( __A, __A ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights["input_proj"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ), requires_grad=__A )
for lyr_num, lyr in enumerate(model.encoders ):
UpperCAmelCase__ = weights[f"""layers_{lyr_num}"""]
UpperCAmelCase__ = ly_weight["attention"]
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def lowerCAmelCase_ ( __A, __A ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights["time_emb_dense0"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights["time_emb_dense1"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ), requires_grad=__A )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(weights["continuous_inputs_projection"]["kernel"].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
UpperCAmelCase__ = weights[f"""layers_{lyr_num}"""]
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight["pre_self_attention_layer_norm"]["scale"] ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_0"]["DenseGeneral_0"]["kernel"].T ) )
UpperCAmelCase__ = ly_weight["self_attention"]
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
UpperCAmelCase__ = ly_weight["MultiHeadDotProductAttention_0"]
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight["pre_cross_attention_layer_norm"]["scale"] ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_1"]["DenseGeneral_0"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights["decoder_norm"]["scale"] ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights["spec_out_dense"]["kernel"].T ) )
return model
def lowerCAmelCase_ ( __A ) -> int:
'''simple docstring'''
UpperCAmelCase__ = checkpoints.load_tax_checkpoint(args.checkpoint_path )
UpperCAmelCase__ = jnp.tree_util.tree_map(onp.array, __A )
UpperCAmelCase__ = [
"from __gin__ import dynamic_registration",
"from music_spectrogram_diffusion.models.diffusion import diffusion_utils",
"diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0",
"diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()",
]
UpperCAmelCase__ = os.path.join(args.checkpoint_path, "..", "config.gin" )
UpperCAmelCase__ = inference.parse_training_gin_file(__A, __A )
UpperCAmelCase__ = inference.InferenceModel(args.checkpoint_path, __A )
UpperCAmelCase__ = DDPMScheduler(beta_schedule="squaredcos_cap_v2", variance_type="fixed_large" )
UpperCAmelCase__ = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["inputs"], vocab_size=synth_model.model.module.config.vocab_size, d_model=synth_model.model.module.config.emb_dim, dropout_rate=synth_model.model.module.config.dropout_rate, num_layers=synth_model.model.module.config.num_encoder_layers, num_heads=synth_model.model.module.config.num_heads, d_kv=synth_model.model.module.config.head_dim, d_ff=synth_model.model.module.config.mlp_dim, feed_forward_proj="gated-gelu", )
UpperCAmelCase__ = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims, targets_context_length=synth_model.sequence_length["targets_context"], d_model=synth_model.model.module.config.emb_dim, dropout_rate=synth_model.model.module.config.dropout_rate, num_layers=synth_model.model.module.config.num_encoder_layers, num_heads=synth_model.model.module.config.num_heads, d_kv=synth_model.model.module.config.head_dim, d_ff=synth_model.model.module.config.mlp_dim, feed_forward_proj="gated-gelu", )
UpperCAmelCase__ = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims, targets_length=synth_model.sequence_length["targets_context"], max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time, d_model=synth_model.model.module.config.emb_dim, num_layers=synth_model.model.module.config.num_decoder_layers, num_heads=synth_model.model.module.config.num_heads, d_kv=synth_model.model.module.config.head_dim, d_ff=synth_model.model.module.config.mlp_dim, dropout_rate=synth_model.model.module.config.dropout_rate, )
UpperCAmelCase__ = load_notes_encoder(ta_checkpoint["target"]["token_encoder"], __A )
UpperCAmelCase__ = load_continuous_encoder(ta_checkpoint["target"]["continuous_encoder"], __A )
UpperCAmelCase__ = load_decoder(ta_checkpoint["target"]["decoder"], __A )
UpperCAmelCase__ = OnnxRuntimeModel.from_pretrained("kashif/soundstream_mel_decoder" )
UpperCAmelCase__ = SpectrogramDiffusionPipeline(
notes_encoder=__A, continuous_encoder=__A, decoder=__A, scheduler=__A, melgan=__A, )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument('--output_path', default=None, type=str, required=True, help='Path to the converted model.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument(
'--checkpoint_path',
default=f'''{MODEL}/checkpoint_500000''',
type=str,
required=False,
help='Path to the original jax model checkpoint.',
)
UpperCamelCase__ = parser.parse_args()
main(args)
| 65 | 0 |
"""simple docstring"""
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
A_ = logging.get_logger(__name__)
class lowercase( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self: List[str], a_: Union[List[ControlNetModel], Tuple[ControlNetModel]] ):
'''simple docstring'''
super().__init__()
_snake_case : Dict = nn.ModuleList(__UpperCAmelCase )
def UpperCamelCase_ ( self: int, a_: torch.FloatTensor, a_: Union[torch.Tensor, float, int], a_: torch.Tensor, a_: List[torch.tensor], a_: List[float], a_: Optional[torch.Tensor] = None, a_: Optional[torch.Tensor] = None, a_: Optional[torch.Tensor] = None, a_: Optional[Dict[str, Any]] = None, a_: bool = False, a_: bool = True, ):
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(__UpperCAmelCase, __UpperCAmelCase, self.nets ) ):
_snake_case , _snake_case : Dict = controlnet(
__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, )
# merge samples
if i == 0:
_snake_case , _snake_case : List[str] = down_samples, mid_sample
else:
_snake_case : str = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(__UpperCAmelCase, __UpperCAmelCase )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def UpperCamelCase_ ( self: Optional[Any], a_: Union[str, os.PathLike], a_: bool = True, a_: Callable = None, a_: bool = False, a_: Optional[str] = None, ):
'''simple docstring'''
_snake_case : str = 0
_snake_case : int = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
__UpperCAmelCase, is_main_process=__UpperCAmelCase, save_function=__UpperCAmelCase, safe_serialization=__UpperCAmelCase, variant=__UpperCAmelCase, )
idx += 1
_snake_case : Any = model_path_to_save + f"_{idx}"
@classmethod
def UpperCamelCase_ ( cls: Dict, a_: Optional[Union[str, os.PathLike]], **a_: List[str] ):
'''simple docstring'''
_snake_case : Union[str, Any] = 0
_snake_case : Dict = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
_snake_case : Dict = pretrained_model_path
while os.path.isdir(__UpperCAmelCase ):
_snake_case : Optional[int] = ControlNetModel.from_pretrained(__UpperCAmelCase, **__UpperCAmelCase )
controlnets.append(__UpperCAmelCase )
idx += 1
_snake_case : str = pretrained_model_path + f"_{idx}"
logger.info(f"{len(__UpperCAmelCase )} controlnets loaded from {pretrained_model_path}." )
if len(__UpperCAmelCase ) == 0:
raise ValueError(
f"No ControlNets found under {os.path.dirname(__UpperCAmelCase )}. Expected at least {pretrained_model_path + '_0'}." )
return cls(__UpperCAmelCase )
| 64 | import math
def lowerCAmelCase_ ( __A ) -> bool:
'''simple docstring'''
return math.sqrt(__A ) * math.sqrt(__A ) == num
def lowerCAmelCase_ ( __A ) -> bool:
'''simple docstring'''
UpperCAmelCase__ = 0
UpperCAmelCase__ = n
while left <= right:
UpperCAmelCase__ = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
UpperCAmelCase__ = mid - 1
else:
UpperCAmelCase__ = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class a__ :
def __init__( self , _a , ):
lowercase : Tuple = parent
lowercase : List[Any] = 13
lowercase : Any = 7
lowercase : int = True
lowercase : Tuple = True
lowercase : Optional[Any] = True
lowercase : Dict = 99
lowercase : int = 32
lowercase : str = 2
lowercase : Dict = 4
lowercase : Optional[Any] = 37
lowercase : Union[str, Any] = "gelu"
lowercase : Optional[Any] = 0.1
lowercase : Union[str, Any] = 0.1
lowercase : Tuple = 512
lowercase : Optional[int] = 16
lowercase : int = 2
lowercase : Any = 0.0_2
lowercase : Dict = 3
lowercase : Union[str, Any] = 4
lowercase : Union[str, Any] = None
def __magic_name__ ( self ):
lowercase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Tuple = None
if self.use_input_mask:
lowercase : int = random_attention_mask([self.batch_size, self.seq_length] )
lowercase : List[str] = None
lowercase : Union[str, Any] = None
lowercase : int = None
if self.use_labels:
lowercase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : Dict = ids_tensor([self.batch_size] , self.num_choices )
lowercase : Union[str, Any] = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__ ( self ):
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : str = self.prepare_config_and_inputs()
lowercase : int = True
lowercase : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __magic_name__ ( self , _a , _a , _a , _a , _a , _a ):
lowercase : List[Any] = TFEsmModel(config=__UpperCAmelCase )
lowercase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask}
lowercase : Optional[int] = model(__UpperCAmelCase )
lowercase : int = [input_ids, input_mask]
lowercase : Dict = model(__UpperCAmelCase )
lowercase : List[str] = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self , _a , _a , _a , _a , _a , _a , _a , _a , ):
lowercase : List[Any] = True
lowercase : List[Any] = TFEsmModel(config=__UpperCAmelCase )
lowercase : int = {
"input_ids": input_ids,
"attention_mask": input_mask,
"encoder_hidden_states": encoder_hidden_states,
"encoder_attention_mask": encoder_attention_mask,
}
lowercase : Optional[Any] = model(__UpperCAmelCase )
lowercase : List[Any] = [input_ids, input_mask]
lowercase : Optional[Any] = model(__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase )
# Also check the case where encoder outputs are not passed
lowercase : List[str] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self , _a , _a , _a , _a , _a , _a ):
lowercase : Optional[int] = TFEsmForMaskedLM(config=__UpperCAmelCase )
lowercase : str = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self , _a , _a , _a , _a , _a , _a ):
lowercase : Any = self.num_labels
lowercase : Optional[Any] = TFEsmForTokenClassification(config=__UpperCAmelCase )
lowercase : Any = {"input_ids": input_ids, "attention_mask": input_mask}
lowercase : Tuple = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self ):
lowercase : List[Any] = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : str = config_and_inputs
lowercase : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class a__ ( UpperCAmelCase_, UpperCAmelCase_, unittest.TestCase ):
__lowerCAmelCase = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
__lowerCAmelCase = (
{
'feature-extraction': TFEsmModel,
'fill-mask': TFEsmForMaskedLM,
'text-classification': TFEsmForSequenceClassification,
'token-classification': TFEsmForTokenClassification,
'zero-shot': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
def __magic_name__ ( self ):
lowercase : List[Any] = TFEsmModelTester(self )
lowercase : Dict = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def __magic_name__ ( self ):
self.config_tester.run_common_tests()
def __magic_name__ ( self ):
lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def __magic_name__ ( self ):
lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__UpperCAmelCase )
def __magic_name__ ( self ):
lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase )
def __magic_name__ ( self ):
lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase )
@slow
def __magic_name__ ( self ):
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Optional[Any] = TFEsmModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@unittest.skip("Protein models do not support embedding resizing." )
def __magic_name__ ( self ):
pass
@unittest.skip("Protein models do not support embedding resizing." )
def __magic_name__ ( self ):
pass
def __magic_name__ ( self ):
lowercase , lowercase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Any = model_class(__UpperCAmelCase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowercase : List[str] = model.get_bias()
assert isinstance(__UpperCAmelCase , __UpperCAmelCase )
for k, v in name.items():
assert isinstance(__UpperCAmelCase , tf.Variable )
else:
lowercase : Tuple = model.get_output_embeddings()
assert x is None
lowercase : int = model.get_bias()
assert name is None
@require_tf
class a__ ( unittest.TestCase ):
@slow
def __magic_name__ ( self ):
lowercase : Union[str, Any] = TFEsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" )
lowercase : Optional[int] = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase : Any = model(__UpperCAmelCase )[0]
lowercase : str = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , __UpperCAmelCase )
# compare the actual values for a slice.
lowercase : Union[str, Any] = tf.constant(
[
[
[8.9_2_1_5_1_8, -1_0.5_8_9_8_1_4, -6.4_6_7_1_3_0_7],
[-6.3_9_6_7_1_5_6, -1_3.9_1_1_3_7_7, -1.1_2_1_1_9_1_5],
[-7.7_8_1_2_4_7, -1_3.9_5_1_5_5_7, -3.7_4_0_5_9_2],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def __magic_name__ ( self ):
lowercase : Union[str, Any] = TFEsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" )
lowercase : Union[str, Any] = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowercase : int = model(__UpperCAmelCase )[0]
# compare the actual values for a slice.
lowercase : Dict = tf.constant(
[
[
[0.1_4_4_4_3_0_9_2, 0.5_4_1_2_5_3_2_7, 0.3_2_4_7_7_3_9],
[0.3_0_3_4_0_4_8_4, 0.0_0_5_2_6_6_7_6, 0.3_1_0_7_7_7_2_2],
[0.3_2_2_7_8_0_4_3, -0.2_4_9_8_7_0_9_6, 0.3_4_1_4_6_2_8],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 202 | import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : torch.FloatTensor
__UpperCAmelCase : Optional[torch.FloatTensor] = None
def lowerCAmelCase_ ( __A, __A=0.999, __A="cosine", ) -> Tuple:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(__A ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__A ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
UpperCAmelCase__ = []
for i in range(__A ):
UpperCAmelCase__ = i / num_diffusion_timesteps
UpperCAmelCase__ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__A ) / alpha_bar_fn(__A ), __A ) )
return torch.tensor(__A, dtype=torch.floataa )
class A ( UpperCAmelCase_ , UpperCAmelCase_ ):
@register_to_config
def __init__(self : List[str] , __UpperCAmelCase : int = 1_0_0_0 , __UpperCAmelCase : str = "fixed_small_log" , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[float] = 1.0 , __UpperCAmelCase : str = "epsilon" , __UpperCAmelCase : str = "squaredcos_cap_v2" , ) -> Optional[int]:
"""simple docstring"""
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" )
UpperCAmelCase__ = betas_for_alpha_bar(__UpperCAmelCase )
UpperCAmelCase__ = 1.0 - self.betas
UpperCAmelCase__ = torch.cumprod(self.alphas , dim=0 )
UpperCAmelCase__ = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
UpperCAmelCase__ = 1.0
# setable values
UpperCAmelCase__ = None
UpperCAmelCase__ = torch.from_numpy(np.arange(0 , __UpperCAmelCase )[::-1].copy() )
UpperCAmelCase__ = variance_type
def lowercase_ (self : List[str] , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : Optional[int] = None ) -> torch.FloatTensor:
"""simple docstring"""
return sample
def lowercase_ (self : int , __UpperCAmelCase : int , __UpperCAmelCase : Union[str, torch.device] = None ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = num_inference_steps
UpperCAmelCase__ = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
UpperCAmelCase__ = (np.arange(0 , __UpperCAmelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa )
UpperCAmelCase__ = torch.from_numpy(__UpperCAmelCase ).to(__UpperCAmelCase )
def lowercase_ (self : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : List[str]=None ) -> Tuple:
"""simple docstring"""
if prev_timestep is None:
UpperCAmelCase__ = t - 1
UpperCAmelCase__ = self.alphas_cumprod[t]
UpperCAmelCase__ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
UpperCAmelCase__ = 1 - alpha_prod_t
UpperCAmelCase__ = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
UpperCAmelCase__ = self.betas[t]
else:
UpperCAmelCase__ = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCAmelCase__ = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
UpperCAmelCase__ = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
UpperCAmelCase__ = torch.log(torch.clamp(__UpperCAmelCase , min=1E-20 ) )
UpperCAmelCase__ = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
UpperCAmelCase__ = variance.log()
UpperCAmelCase__ = beta.log()
UpperCAmelCase__ = (predicted_variance + 1) / 2
UpperCAmelCase__ = frac * max_log + (1 - frac) * min_log
return variance
def lowercase_ (self : Optional[int] , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : int , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : bool = True , ) -> Union[UnCLIPSchedulerOutput, Tuple]:
"""simple docstring"""
UpperCAmelCase__ = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
UpperCAmelCase__ , UpperCAmelCase__ = torch.split(__UpperCAmelCase , sample.shape[1] , dim=1 )
else:
UpperCAmelCase__ = None
# 1. compute alphas, betas
if prev_timestep is None:
UpperCAmelCase__ = t - 1
UpperCAmelCase__ = self.alphas_cumprod[t]
UpperCAmelCase__ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
UpperCAmelCase__ = 1 - alpha_prod_t
UpperCAmelCase__ = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
UpperCAmelCase__ = self.betas[t]
UpperCAmelCase__ = self.alphas[t]
else:
UpperCAmelCase__ = 1 - alpha_prod_t / alpha_prod_t_prev
UpperCAmelCase__ = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCAmelCase__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCAmelCase__ = model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"""
" for the UnCLIPScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCAmelCase__ = torch.clamp(
__UpperCAmelCase , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
UpperCAmelCase__ = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
UpperCAmelCase__ = 0
if t > 0:
UpperCAmelCase__ = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=__UpperCAmelCase , device=model_output.device )
UpperCAmelCase__ = self._get_variance(
__UpperCAmelCase , predicted_variance=__UpperCAmelCase , prev_timestep=__UpperCAmelCase , )
if self.variance_type == "fixed_small_log":
UpperCAmelCase__ = variance
elif self.variance_type == "learned_range":
UpperCAmelCase__ = (0.5 * variance).exp()
else:
raise ValueError(
f"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"""
" for the UnCLIPScheduler." )
UpperCAmelCase__ = variance * variance_noise
UpperCAmelCase__ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=__UpperCAmelCase , pred_original_sample=__UpperCAmelCase )
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : torch.IntTensor , ) -> torch.FloatTensor:
"""simple docstring"""
UpperCAmelCase__ = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
UpperCAmelCase__ = timesteps.to(original_samples.device )
UpperCAmelCase__ = alphas_cumprod[timesteps] ** 0.5
UpperCAmelCase__ = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
UpperCAmelCase__ = sqrt_alpha_prod.unsqueeze(-1 )
UpperCAmelCase__ = (1 - alphas_cumprod[timesteps]) ** 0.5
UpperCAmelCase__ = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
UpperCAmelCase__ = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
UpperCAmelCase__ = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 65 | 0 |
"""simple docstring"""
from manim import *
class UpperCamelCase_ (UpperCAmelCase_ ):
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
UpperCAmelCase_ : int = Rectangle(height=0.5 , width=0.5 )
UpperCAmelCase_ : Tuple = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
UpperCAmelCase_ : Any = [mem.copy() for i in range(6 )]
UpperCAmelCase_ : List[Any] = [mem.copy() for i in range(6 )]
UpperCAmelCase_ : List[Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase_ : Union[str, Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase_ : Tuple = VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase_ : Union[str, Any] = Text("CPU" , font_size=24 )
UpperCAmelCase_ : Tuple = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__UpperCAmelCase )
UpperCAmelCase_ : Optional[int] = [mem.copy() for i in range(1 )]
UpperCAmelCase_ : Optional[Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase_ : Optional[Any] = Text("GPU" , font_size=24 )
UpperCAmelCase_ : Optional[Any] = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
gpu.align_to(__UpperCAmelCase , __UpperCAmelCase )
gpu.set_x(gpu.get_x() - 1 )
self.add(__UpperCAmelCase )
UpperCAmelCase_ : Any = [mem.copy() for i in range(6 )]
UpperCAmelCase_ : Tuple = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase_ : str = Text("Model" , font_size=24 )
UpperCAmelCase_ : Optional[int] = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
model.move_to([3, -1.0, 0] )
self.play(
Create(__UpperCAmelCase , run_time=1 ) , Create(__UpperCAmelCase , run_time=1 ) , Create(__UpperCAmelCase , run_time=1 ) , )
UpperCAmelCase_ : str = MarkupText(
f"""First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM.""" , font_size=24 , )
UpperCAmelCase_ : Dict = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCAmelCase_ : Optional[Any] = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCAmelCase , run_time=2.5 ) , Write(__UpperCAmelCase ) , Write(__UpperCAmelCase ) )
self.add(__UpperCAmelCase )
UpperCAmelCase_ : Union[str, Any] = []
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Optional[Any] = []
for i, rect in enumerate(__UpperCAmelCase ):
UpperCAmelCase_ : int = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(__UpperCAmelCase , opacity=0.7 )
cpu_target.move_to(__UpperCAmelCase )
cpu_target.generate_target()
UpperCAmelCase_ : Union[str, Any] = 0.4_6 / 4
UpperCAmelCase_ : List[str] = 0.4_6 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=__UpperCAmelCase )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=__UpperCAmelCase , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=__UpperCAmelCase , buff=0.0 )
cpu_targs.append(__UpperCAmelCase )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(__UpperCAmelCase ) )
second_animations.append(MoveToTarget(__UpperCAmelCase , run_time=1.5 ) )
self.play(*__UpperCAmelCase )
self.play(*__UpperCAmelCase )
self.wait()
| 268 | import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class A ( unittest.TestCase ):
def lowercase_ (self : Union[str, Any] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = inspect.getfile(accelerate.test_utils )
UpperCAmelCase__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
UpperCAmelCase__ = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] )
UpperCAmelCase__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] )
@require_multi_gpu
def lowercase_ (self : List[str] ) -> Any:
"""simple docstring"""
print(f"""Found {torch.cuda.device_count()} devices.""" )
UpperCAmelCase__ = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() )
@require_multi_gpu
def lowercase_ (self : str ) -> str:
"""simple docstring"""
print(f"""Found {torch.cuda.device_count()} devices.""" )
UpperCAmelCase__ = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path]
print(f"""Command: {cmd}""" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() )
@require_multi_gpu
def lowercase_ (self : Tuple ) -> int:
"""simple docstring"""
UpperCAmelCase__ = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() )
@require_multi_gpu
def lowercase_ (self : Dict ) -> str:
"""simple docstring"""
print(f"""Found {torch.cuda.device_count()} devices, using 2 devices only""" )
UpperCAmelCase__ = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() )
if __name__ == "__main__":
UpperCamelCase__ = Accelerator()
UpperCamelCase__ = (accelerator.state.process_index + 2, 1_0)
UpperCamelCase__ = torch.randint(0, 1_0, shape).to(accelerator.device)
UpperCamelCase__ = ''
UpperCamelCase__ = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
UpperCamelCase__ = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
UpperCamelCase__ = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 65 | 0 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''',
}
class _lowerCamelCase ( UpperCAmelCase_ ):
UpperCAmelCase_ = 'instructblip_vision_model'
def __init__(self , __a=14_08 , __a=61_44 , __a=39 , __a=16 , __a=2_24 , __a=14 , __a="gelu" , __a=1e-6 , __a=0.0 , __a=1e-1_0 , __a=True , **__a , ) -> Dict:
super().__init__(**__UpperCAmelCase )
UpperCamelCase = hidden_size
UpperCamelCase = intermediate_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = patch_size
UpperCamelCase = image_size
UpperCamelCase = initializer_range
UpperCamelCase = attention_dropout
UpperCamelCase = layer_norm_eps
UpperCamelCase = hidden_act
UpperCamelCase = qkv_bias
@classmethod
def snake_case_ (cls , __a , **__a ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__UpperCAmelCase )
UpperCamelCase , UpperCamelCase = cls.get_config_dict(__UpperCAmelCase , **__UpperCAmelCase )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
UpperCamelCase = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(__UpperCAmelCase , **__UpperCAmelCase )
class _lowerCamelCase ( UpperCAmelCase_ ):
UpperCAmelCase_ = 'instructblip_qformer'
def __init__(self , __a=3_05_22 , __a=7_68 , __a=12 , __a=12 , __a=30_72 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=0.02 , __a=1e-1_2 , __a=0 , __a="absolute" , __a=2 , __a=14_08 , **__a , ) -> Dict:
super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase )
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_act
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = position_embedding_type
UpperCamelCase = cross_attention_frequency
UpperCamelCase = encoder_hidden_size
@classmethod
def snake_case_ (cls , __a , **__a ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__UpperCAmelCase )
UpperCamelCase , UpperCamelCase = cls.get_config_dict(__UpperCAmelCase , **__UpperCAmelCase )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
UpperCamelCase = config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(__UpperCAmelCase , **__UpperCAmelCase )
class _lowerCamelCase ( UpperCAmelCase_ ):
UpperCAmelCase_ = 'instructblip'
UpperCAmelCase_ = True
def __init__(self , __a=None , __a=None , __a=None , __a=32 , **__a ) -> Optional[int]:
super().__init__(**__UpperCAmelCase )
if vision_config is None:
UpperCamelCase = {}
logger.info("vision_config is None. initializing the InstructBlipVisionConfig with default values." )
if qformer_config is None:
UpperCamelCase = {}
logger.info("qformer_config is None. Initializing the InstructBlipQFormerConfig with default values." )
if text_config is None:
UpperCamelCase = {}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
UpperCamelCase = InstructBlipVisionConfig(**__UpperCAmelCase )
UpperCamelCase = InstructBlipQFormerConfig(**__UpperCAmelCase )
UpperCamelCase = text_config["model_type"] if "model_type" in text_config else "opt"
UpperCamelCase = CONFIG_MAPPING[text_model_type](**__UpperCAmelCase )
UpperCamelCase = self.text_config.tie_word_embeddings
UpperCamelCase = self.text_config.is_encoder_decoder
UpperCamelCase = num_query_tokens
UpperCamelCase = self.vision_config.hidden_size
UpperCamelCase = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
UpperCamelCase = 1.0
UpperCamelCase = 0.02
@classmethod
def snake_case_ (cls , __a , __a , __a , **__a , ) -> Tuple:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__UpperCAmelCase , )
def snake_case_ (self ) -> Union[str, Any]:
UpperCamelCase = copy.deepcopy(self.__dict__ )
UpperCamelCase = self.vision_config.to_dict()
UpperCamelCase = self.qformer_config.to_dict()
UpperCamelCase = self.text_config.to_dict()
UpperCamelCase = self.__class__.model_type
return output
| 153 | import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def lowerCAmelCase_ ( __A ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
"decoder.output_projection.weight",
]
for k in ignore_keys:
state_dict.pop(__A, __A )
def lowerCAmelCase_ ( __A ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ = emb.weight.shape
UpperCAmelCase__ = nn.Linear(__A, __A, bias=__A )
UpperCAmelCase__ = emb.weight.data
return lin_layer
def lowerCAmelCase_ ( __A, __A="facebook/mbart-large-en-ro", __A=False, __A=False ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ = torch.load(__A, map_location="cpu" )["model"]
remove_ignore_keys_(__A )
UpperCAmelCase__ = state_dict["encoder.embed_tokens.weight"].shape[0]
UpperCAmelCase__ = MBartConfig.from_pretrained(__A, vocab_size=__A )
if mbart_aa and finetuned:
UpperCAmelCase__ = "relu"
UpperCAmelCase__ = state_dict["decoder.embed_tokens.weight"]
UpperCAmelCase__ = MBartForConditionalGeneration(__A )
model.model.load_state_dict(__A )
if finetuned:
UpperCAmelCase__ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config',
default='facebook/mbart-large-cc25',
type=str,
help='Which huggingface architecture to use: mbart-large',
)
parser.add_argument('--mbart_50', action='store_true', help='whether the model is mMART-50 checkpoint')
parser.add_argument('--finetuned', action='store_true', help='whether the model is a fine-tuned checkpoint')
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 65 | 0 |
def lowercase_ (A : Any , A : Any , A : Any ):
if len(__A ) != len(__A ):
raise ValueError('The length of profit and weight must be same.' )
if max_weight <= 0:
raise ValueError('max_weight must greater than zero.' )
if any(p < 0 for p in profit ):
raise ValueError('Profit can not be negative.' )
if any(w < 0 for w in weight ):
raise ValueError('Weight can not be negative.' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
snake_case__ : int = [p / w for p, w in zip(__A , __A )]
# Creating a copy of the list and sorting profit/weight in ascending order
snake_case__ : Union[str, Any] = sorted(__A )
# declaring useful variables
snake_case__ : Any = len(__A )
snake_case__ : Optional[int] = 0
snake_case__ : Optional[Any] = 0
snake_case__ : Union[str, Any] = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
snake_case__ : Optional[Any] = sorted_profit_by_weight[length - i - 1]
snake_case__ : List[str] = profit_by_weight.index(__A )
snake_case__ : Tuple = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
"Input profits, weights, and then max_weight (all positive ints) separated by "
"spaces."
)
a_ :Any = [int(x) for x in input("Input profits separated by spaces: ").split()]
a_ :List[str] = [int(x) for x in input("Input weights separated by spaces: ").split()]
a_ :Union[str, Any] = int(input("Max weight allowed: "))
# Function Call
calc_profit(profit, weight, max_weight)
| 277 | from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
UpperCamelCase__ = [
'python',
'tqdm',
'regex',
'requests',
'packaging',
'filelock',
'numpy',
'tokenizers',
'huggingface-hub',
'safetensors',
'accelerate',
'pyyaml',
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def lowerCAmelCase_ ( __A, __A=None ) -> Dict:
'''simple docstring'''
require_version(deps[pkg], __A )
| 65 | 0 |
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
lowerCAmelCase__ : Union[str, Any] =logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class UpperCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self , *_A , _A=None , _A=None , _A=None , **_A ):
'''simple docstring'''
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = eval_examples
__SCREAMING_SNAKE_CASE = post_process_function
__SCREAMING_SNAKE_CASE = quant_trainer_args
__SCREAMING_SNAKE_CASE = 128 # default number of calibration samples
def _A ( self , _A=None ):
'''simple docstring'''
if calib_dataset is None and self.calib_dataset is None:
raise ValueError('Trainer: calibration requires an calib_dataset.' )
__SCREAMING_SNAKE_CASE = calib_dataset if calib_dataset is not None else self.calib_dataset
__SCREAMING_SNAKE_CASE = self._remove_unused_columns(__UpperCAmelCase , description='Calibration' )
return DataLoader(
__UpperCAmelCase , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=__UpperCAmelCase , )
def _A ( self , _A=None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.train_dataset if calib_dataset is None else calib_dataset
__SCREAMING_SNAKE_CASE = self.get_calib_dataloader(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = self.model
quant_trainer.configure_model(__UpperCAmelCase , self.quant_trainer_args , calib=__UpperCAmelCase )
model.eval()
quant_trainer.enable_calibration(__UpperCAmelCase )
logger.info('***** Running calibration *****' )
logger.info(f""" Num examples = {self.calib_num}""" )
logger.info(f""" Batch size = {calib_dataloader.batch_size}""" )
for step, inputs in enumerate(__UpperCAmelCase ):
# Prediction step
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.prediction_step(__UpperCAmelCase , __UpperCAmelCase , prediction_loss_only=__UpperCAmelCase )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(__UpperCAmelCase , self.quant_trainer_args )
__SCREAMING_SNAKE_CASE = model
def _A ( self , _A=None , _A=None , _A=None , _A = "eval" ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.eval_dataset if eval_dataset is None else eval_dataset
__SCREAMING_SNAKE_CASE = self.get_eval_dataloader(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__SCREAMING_SNAKE_CASE = self.compute_metrics
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__SCREAMING_SNAKE_CASE = eval_loop(
__UpperCAmelCase , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__UpperCAmelCase , )
finally:
__SCREAMING_SNAKE_CASE = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
__SCREAMING_SNAKE_CASE = self.post_process_function(__UpperCAmelCase , __UpperCAmelCase , output.predictions )
__SCREAMING_SNAKE_CASE = self.compute_metrics(__UpperCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
__SCREAMING_SNAKE_CASE = metrics.pop(__UpperCAmelCase )
self.log(__UpperCAmelCase )
else:
__SCREAMING_SNAKE_CASE = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__SCREAMING_SNAKE_CASE = self.callback_handler.on_evaluate(self.args , self.state , self.control , __UpperCAmelCase )
return metrics
def _A ( self , _A , _A , _A=None , _A = "test" ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_test_dataloader(__UpperCAmelCase )
# Temporarily disable metric computation, we will do it in the loop here.
__SCREAMING_SNAKE_CASE = self.compute_metrics
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__SCREAMING_SNAKE_CASE = eval_loop(
__UpperCAmelCase , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__UpperCAmelCase , )
finally:
__SCREAMING_SNAKE_CASE = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
__SCREAMING_SNAKE_CASE = self.post_process_function(__UpperCAmelCase , __UpperCAmelCase , output.predictions , 'predict' )
__SCREAMING_SNAKE_CASE = self.compute_metrics(__UpperCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
__SCREAMING_SNAKE_CASE = metrics.pop(__UpperCAmelCase )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__UpperCAmelCase )
def _A ( self , _A="./" ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.eval_dataset
__SCREAMING_SNAKE_CASE = self.get_eval_dataloader(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = next(iter(__UpperCAmelCase ) )
# saving device - to make it consistent
__SCREAMING_SNAKE_CASE = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
# convert to tuple
__SCREAMING_SNAKE_CASE = tuple(v.to(__UpperCAmelCase ) for k, v in batch.items() )
logger.info('Converting model to be onnx compatible' )
from pytorch_quantization.nn import TensorQuantizer
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = self.model.to(__UpperCAmelCase )
model.eval()
model.float()
__SCREAMING_SNAKE_CASE = model.module if hasattr(__UpperCAmelCase , 'module' ) else model
quant_trainer.configure_model(__UpperCAmelCase , self.quant_trainer_args )
__SCREAMING_SNAKE_CASE = os.path.join(__UpperCAmelCase , 'model.onnx' )
logger.info(f"""exporting model to {output_model_file}""" )
__SCREAMING_SNAKE_CASE = {0: 'batch_size', 1: 'seq_len'}
torch.onnx.export(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , export_params=__UpperCAmelCase , opset_version=13 , do_constant_folding=__UpperCAmelCase , input_names=['input_ids', 'attention_mask', 'token_type_ids'] , output_names=['output_start_logits', 'output_end_logits'] , dynamic_axes={
'input_ids': axes,
'attention_mask': axes,
'token_type_ids': axes,
'output_start_logits': axes,
'output_end_logits': axes,
} , verbose=__UpperCAmelCase , )
logger.info('onnx export finished' )
| 257 | import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
UpperCamelCase__ = logging.getLogger(__name__)
def lowerCAmelCase_ ( ) -> int:
'''simple docstring'''
UpperCAmelCase__ = argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." )
parser.add_argument("--file_path", type=__A, default="data/dump.txt", help="The path to the data." )
parser.add_argument("--tokenizer_type", type=__A, default="bert", choices=["bert", "roberta", "gpt2"] )
parser.add_argument("--tokenizer_name", type=__A, default="bert-base-uncased", help="The tokenizer to use." )
parser.add_argument("--dump_file", type=__A, default="data/dump", help="The dump file prefix." )
UpperCAmelCase__ = parser.parse_args()
logger.info(f"""Loading Tokenizer ({args.tokenizer_name})""" )
if args.tokenizer_type == "bert":
UpperCAmelCase__ = BertTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase__ = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
UpperCAmelCase__ = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
UpperCAmelCase__ = RobertaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase__ = tokenizer.special_tokens_map["cls_token"] # `<s>`
UpperCAmelCase__ = tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
UpperCAmelCase__ = GPTaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase__ = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
UpperCAmelCase__ = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(f"""Loading text from {args.file_path}""" )
with open(args.file_path, "r", encoding="utf8" ) as fp:
UpperCAmelCase__ = fp.readlines()
logger.info("Start encoding" )
logger.info(f"""{len(__A )} examples to process.""" )
UpperCAmelCase__ = []
UpperCAmelCase__ = 0
UpperCAmelCase__ = 10_000
UpperCAmelCase__ = time.time()
for text in data:
UpperCAmelCase__ = f"""{bos} {text.strip()} {sep}"""
UpperCAmelCase__ = tokenizer.encode(__A, add_special_tokens=__A )
rslt.append(__A )
iter += 1
if iter % interval == 0:
UpperCAmelCase__ = time.time()
logger.info(f"""{iter} examples processed. - {(end-start):.2f}s/{interval}expl""" )
UpperCAmelCase__ = time.time()
logger.info("Finished binarization" )
logger.info(f"""{len(__A )} examples processed.""" )
UpperCAmelCase__ = f"""{args.dump_file}.{args.tokenizer_name}.pickle"""
UpperCAmelCase__ = tokenizer.vocab_size
if vocab_size < (1 << 16):
UpperCAmelCase__ = [np.uintaa(__A ) for d in rslt]
else:
UpperCAmelCase__ = [np.intaa(__A ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f"""Dump to {dp_file}""" )
with open(__A, "wb" ) as handle:
pickle.dump(rslt_, __A, protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 65 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""microsoft/beit-base-patch16-224-pt22k""": (
"""https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"""
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
__UpperCAmelCase : List[str] = 'beit'
def __init__( self , UpperCamelCase__=8192 , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__="gelu" , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-12 , UpperCamelCase__=224 , UpperCamelCase__=16 , UpperCamelCase__=3 , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=True , UpperCamelCase__=[3, 5, 7, 11] , UpperCamelCase__=[1, 2, 3, 6] , UpperCamelCase__=True , UpperCamelCase__=0.4 , UpperCamelCase__=256 , UpperCamelCase__=1 , UpperCamelCase__=False , UpperCamelCase__=255 , **UpperCamelCase__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
snake_case : int = vocab_size
snake_case : int = hidden_size
snake_case : List[Any] = num_hidden_layers
snake_case : List[Any] = num_attention_heads
snake_case : Dict = intermediate_size
snake_case : int = hidden_act
snake_case : Tuple = hidden_dropout_prob
snake_case : Optional[Any] = attention_probs_dropout_prob
snake_case : str = initializer_range
snake_case : Dict = layer_norm_eps
snake_case : Optional[Any] = image_size
snake_case : Tuple = patch_size
snake_case : List[str] = num_channels
snake_case : List[str] = use_mask_token
snake_case : Optional[int] = use_absolute_position_embeddings
snake_case : List[str] = use_relative_position_bias
snake_case : Optional[int] = use_shared_relative_position_bias
snake_case : str = layer_scale_init_value
snake_case : str = drop_path_rate
snake_case : List[Any] = use_mean_pooling
# decode head attributes (semantic segmentation)
snake_case : Tuple = out_indices
snake_case : int = pool_scales
# auxiliary head attributes (semantic segmentation)
snake_case : Optional[int] = use_auxiliary_head
snake_case : Dict = auxiliary_loss_weight
snake_case : Optional[Any] = auxiliary_channels
snake_case : List[str] = auxiliary_num_convs
snake_case : List[str] = auxiliary_concat_input
snake_case : Dict = semantic_loss_ignore_index
class _lowerCAmelCase ( UpperCAmelCase_ ):
__UpperCAmelCase : Optional[Any] = version.parse('''1.11''' )
@property
def lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowerCamelCase ( self ) -> float:
'''simple docstring'''
return 1e-4
| 203 | from manim import *
class A ( UpperCAmelCase_ ):
def lowercase_ (self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = Rectangle(height=0.5 , width=0.5 )
UpperCAmelCase__ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCAmelCase__ = Rectangle(height=0.25 , width=0.25 )
UpperCAmelCase__ = [mem.copy() for i in range(6 )]
UpperCAmelCase__ = [mem.copy() for i in range(6 )]
UpperCAmelCase__ = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase__ = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase__ = VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase__ = Text("CPU" , font_size=2_4 )
UpperCAmelCase__ = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__UpperCAmelCase )
UpperCAmelCase__ = [mem.copy() for i in range(4 )]
UpperCAmelCase__ = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase__ = Text("GPU" , font_size=2_4 )
UpperCAmelCase__ = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(__UpperCAmelCase )
UpperCAmelCase__ = [mem.copy() for i in range(6 )]
UpperCAmelCase__ = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase__ = Text("Model" , font_size=2_4 )
UpperCAmelCase__ = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(__UpperCAmelCase )
UpperCAmelCase__ = []
UpperCAmelCase__ = []
for i, rect in enumerate(__UpperCAmelCase ):
UpperCAmelCase__ = fill.copy().set_fill(__UpperCAmelCase , opacity=0.8 )
target.move_to(__UpperCAmelCase )
model_arr.append(__UpperCAmelCase )
UpperCAmelCase__ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(__UpperCAmelCase , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(__UpperCAmelCase )
self.add(*__UpperCAmelCase , *__UpperCAmelCase )
UpperCAmelCase__ = [meta_mem.copy() for i in range(6 )]
UpperCAmelCase__ = [meta_mem.copy() for i in range(6 )]
UpperCAmelCase__ = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase__ = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase__ = VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase__ = Text("Disk" , font_size=2_4 )
UpperCAmelCase__ = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
disk.move_to([-4, -1.25, 0] )
self.add(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCAmelCase__ = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=1_8 , )
blue_text.next_to(__UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__UpperCAmelCase )
UpperCAmelCase__ = MarkupText(
f"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCAmelCase ) )
UpperCAmelCase__ = Square(0.3 )
input.set_fill(__UpperCAmelCase , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , __UpperCAmelCase , buff=0.5 )
self.play(Write(__UpperCAmelCase ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=__UpperCAmelCase , buff=0.02 )
self.play(MoveToTarget(__UpperCAmelCase ) )
self.play(FadeOut(__UpperCAmelCase ) )
UpperCAmelCase__ = Arrow(start=__UpperCAmelCase , end=__UpperCAmelCase , color=__UpperCAmelCase , buff=0.5 )
a.next_to(model_arr[0].get_left() , __UpperCAmelCase , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
UpperCAmelCase__ = MarkupText(
f"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCAmelCase , run_time=3 ) )
UpperCAmelCase__ = {"run_time": 1, "fade_in": True, "fade_out": True, "buff": 0.02}
self.play(
Write(__UpperCAmelCase ) , Circumscribe(model_arr[0] , color=__UpperCAmelCase , **__UpperCAmelCase ) , Circumscribe(model_cpu_arr[0] , color=__UpperCAmelCase , **__UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=__UpperCAmelCase , **__UpperCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
UpperCAmelCase__ = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , __UpperCAmelCase , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
UpperCAmelCase__ = AnimationGroup(
FadeOut(__UpperCAmelCase , run_time=0.5 ) , MoveToTarget(__UpperCAmelCase , run_time=0.5 ) , FadeIn(__UpperCAmelCase , run_time=0.5 ) , lag_ratio=0.2 )
self.play(__UpperCAmelCase )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
UpperCAmelCase__ = 0.7
self.play(
Circumscribe(model_arr[i] , **__UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i] , **__UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=__UpperCAmelCase , **__UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=__UpperCAmelCase , **__UpperCAmelCase ) , Circumscribe(model_arr[i + 1] , color=__UpperCAmelCase , **__UpperCAmelCase ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=__UpperCAmelCase , **__UpperCAmelCase ) , Circumscribe(cpu_left_col_base[-1] , color=__UpperCAmelCase , **__UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=__UpperCAmelCase , **__UpperCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
UpperCAmelCase__ = a_c
UpperCAmelCase__ = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(__UpperCAmelCase ) , FadeOut(__UpperCAmelCase , run_time=0.5 ) , )
UpperCAmelCase__ = MarkupText(f"""Inference on a model too large for GPU memory\nis successfully completed.""" , font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCAmelCase , run_time=3 ) , MoveToTarget(__UpperCAmelCase ) )
self.wait()
| 65 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class _UpperCAmelCase ( unittest.TestCase):
def _snake_case ( self : int ):
snake_case_ : Tuple = tempfile.mkdtemp()
snake_case_ : str = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
snake_case_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
snake_case_ : Any = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
snake_case_ : int = os.path.join(self.tmpdirname , __UpperCAmelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
def _snake_case ( self : str , **lowercase_ : int ):
return BertTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def _snake_case ( self : Tuple , **lowercase_ : List[str] ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def _snake_case ( self : Dict , **lowercase_ : List[Any] ):
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def _snake_case ( self : Any ):
shutil.rmtree(self.tmpdirname )
def _snake_case ( self : int ):
snake_case_ : Any = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
snake_case_ : Any = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case ( self : int ):
snake_case_ : List[Any] = self.get_tokenizer()
snake_case_ : Optional[Any] = self.get_rust_tokenizer()
snake_case_ : List[Any] = self.get_image_processor()
snake_case_ : Tuple = AlignProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
snake_case_ : Tuple = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=__UpperCAmelCase )
snake_case_ : Tuple = AlignProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
snake_case_ : Tuple = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __UpperCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , __UpperCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __UpperCAmelCase )
self.assertIsInstance(processor_fast.image_processor , __UpperCAmelCase )
def _snake_case ( self : Dict ):
snake_case_ : Any = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case_ : List[str] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
snake_case_ : int = self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 )
snake_case_ : List[str] = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def _snake_case ( self : int ):
snake_case_ : Tuple = self.get_image_processor()
snake_case_ : Optional[Any] = self.get_tokenizer()
snake_case_ : str = AlignProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
snake_case_ : Dict = self.prepare_image_inputs()
snake_case_ : List[str] = image_processor(__UpperCAmelCase , return_tensors='''np''' )
snake_case_ : Optional[int] = processor(images=__UpperCAmelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _snake_case ( self : List[Any] ):
snake_case_ : Optional[Any] = self.get_image_processor()
snake_case_ : Optional[int] = self.get_tokenizer()
snake_case_ : int = AlignProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
snake_case_ : Optional[int] = '''lower newer'''
snake_case_ : int = processor(text=__UpperCAmelCase )
snake_case_ : Any = tokenizer(__UpperCAmelCase , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _snake_case ( self : Dict ):
snake_case_ : str = self.get_image_processor()
snake_case_ : List[str] = self.get_tokenizer()
snake_case_ : Any = AlignProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
snake_case_ : List[Any] = '''lower newer'''
snake_case_ : Optional[Any] = self.prepare_image_inputs()
snake_case_ : Optional[Any] = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def _snake_case ( self : Optional[Any] ):
snake_case_ : Any = self.get_image_processor()
snake_case_ : Optional[int] = self.get_tokenizer()
snake_case_ : Any = AlignProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
snake_case_ : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case_ : int = processor.batch_decode(__UpperCAmelCase )
snake_case_ : Optional[Any] = tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def _snake_case ( self : List[str] ):
snake_case_ : List[str] = self.get_image_processor()
snake_case_ : List[Any] = self.get_tokenizer()
snake_case_ : Dict = AlignProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
snake_case_ : Dict = '''lower newer'''
snake_case_ : List[Any] = self.prepare_image_inputs()
snake_case_ : str = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 264 | from __future__ import annotations
from scipy.special import comb # type: ignore
class A :
def __init__(self : List[Any] , __UpperCAmelCase : list[tuple[float, float]] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
UpperCAmelCase__ = len(__UpperCAmelCase ) - 1
def lowercase_ (self : int , __UpperCAmelCase : float ) -> list[float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
UpperCAmelCase__ = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , __UpperCAmelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(__UpperCAmelCase ) , 5 ) == 1
return output_values
def lowercase_ (self : Dict , __UpperCAmelCase : float ) -> tuple[float, float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
UpperCAmelCase__ = self.basis_function(__UpperCAmelCase )
UpperCAmelCase__ = 0.0
UpperCAmelCase__ = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def lowercase_ (self : Optional[int] , __UpperCAmelCase : float = 0.01 ) -> Optional[int]:
"""simple docstring"""
from matplotlib import pyplot as plt # type: ignore
UpperCAmelCase__ = [] # x coordinates of points to plot
UpperCAmelCase__ = [] # y coordinates of points to plot
UpperCAmelCase__ = 0.0
while t <= 1:
UpperCAmelCase__ = self.bezier_curve_function(__UpperCAmelCase )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
UpperCAmelCase__ = [i[0] for i in self.list_of_points]
UpperCAmelCase__ = [i[1] for i in self.list_of_points]
plt.plot(
__UpperCAmelCase , __UpperCAmelCase , color="blue" , label="Curve of Degree " + str(self.degree ) , )
plt.scatter(__UpperCAmelCase , __UpperCAmelCase , color="red" , label="Control Points" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 65 | 0 |
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
_lowerCamelCase : Optional[int] = TypeVar("KEY")
_lowerCamelCase : int = TypeVar("VAL")
@dataclass(frozen=UpperCAmelCase_ , slots=UpperCAmelCase_ )
class __UpperCAmelCase ( Generic[KEY, VAL] ):
UpperCamelCase = 42
UpperCamelCase = 42
class __UpperCAmelCase ( _Item ):
def __init__( self : str ):
super().__init__(__UpperCAmelCase, __UpperCAmelCase )
def __bool__( self : List[Any] ):
return False
_lowerCamelCase : List[Any] = _DeletedItem()
class __UpperCAmelCase ( MutableMapping[KEY, VAL] ):
def __init__( self : List[str], __A : int = 8, __A : float = 0.7_5 ):
UpperCAmelCase : Union[str, Any] = initial_block_size
UpperCAmelCase : int = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
UpperCAmelCase : str = capacity_factor
UpperCAmelCase : Union[str, Any] = 0
def __magic_name__ ( self : str, __A : KEY ):
return hash(__UpperCAmelCase ) % len(self._buckets )
def __magic_name__ ( self : Optional[int], __A : int ):
return (ind + 1) % len(self._buckets )
def __magic_name__ ( self : Optional[int], __A : int, __A : KEY, __A : VAL ):
UpperCAmelCase : List[str] = self._buckets[ind]
if not stored:
UpperCAmelCase : Dict = _Item(__UpperCAmelCase, __UpperCAmelCase )
self._len += 1
return True
elif stored.key == key:
UpperCAmelCase : Tuple = _Item(__UpperCAmelCase, __UpperCAmelCase )
return True
else:
return False
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : Union[str, Any] = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(__UpperCAmelCase )
def __magic_name__ ( self : Dict ):
if len(self._buckets ) <= self._initial_block_size:
return False
UpperCAmelCase : Optional[int] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def __magic_name__ ( self : int, __A : int ):
UpperCAmelCase : Dict = self._buckets
UpperCAmelCase : Union[str, Any] = [None] * new_size
UpperCAmelCase : List[str] = 0
for item in old_buckets:
if item:
self._add_item(item.key, item.val )
def __magic_name__ ( self : str ):
self._resize(len(self._buckets ) * 2 )
def __magic_name__ ( self : List[str] ):
self._resize(len(self._buckets ) // 2 )
def __magic_name__ ( self : Dict, __A : KEY ):
UpperCAmelCase : int = self._get_bucket_index(__UpperCAmelCase )
for _ in range(len(self._buckets ) ):
yield ind
UpperCAmelCase : Optional[int] = self._get_next_ind(__UpperCAmelCase )
def __magic_name__ ( self : Optional[Any], __A : KEY, __A : VAL ):
for ind in self._iterate_buckets(__UpperCAmelCase ):
if self._try_set(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ):
break
def __setitem__( self : List[Any], __A : KEY, __A : VAL ):
if self._is_full():
self._size_up()
self._add_item(__UpperCAmelCase, __UpperCAmelCase )
def __delitem__( self : str, __A : KEY ):
for ind in self._iterate_buckets(__UpperCAmelCase ):
UpperCAmelCase : int = self._buckets[ind]
if item is None:
raise KeyError(__UpperCAmelCase )
if item is _deleted:
continue
if item.key == key:
UpperCAmelCase : int = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : Dict, __A : KEY ):
for ind in self._iterate_buckets(__UpperCAmelCase ):
UpperCAmelCase : Union[str, Any] = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(__UpperCAmelCase )
def __len__( self : int ):
return self._len
def __iter__( self : Optional[Any] ):
yield from (item.key for item in self._buckets if item)
def __repr__( self : Optional[Any] ):
UpperCAmelCase : Dict = ''' ,'''.join(
F'''{item.key}: {item.val}''' for item in self._buckets if item )
return F'''HashMap({val_string})'''
| 336 | import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(UpperCAmelCase_ ) , 'Tatoeba directory does not exist.' )
class A ( unittest.TestCase ):
@cached_property
def lowercase_ (self : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = tempfile.mkdtemp()
return TatoebaConverter(save_dir=__UpperCAmelCase )
@slow
def lowercase_ (self : List[Any] ) -> Optional[int]:
"""simple docstring"""
self.resolver.convert_models(["heb-eng"] )
@slow
def lowercase_ (self : Dict ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.resolver.write_model_card("opus-mt-he-en" , dry_run=__UpperCAmelCase )
assert mmeta["long_pair"] == "heb-eng"
| 65 | 0 |
"""simple docstring"""
import functools
from typing import Any
def _lowerCamelCase( a , a ):
if not isinstance(__A , __A ) or len(__A ) == 0:
raise ValueError("the string should be not empty string" )
if not isinstance(__A , __A ) or not all(
isinstance(__A , __A ) and len(__A ) > 0 for item in words ):
raise ValueError("the words should be a list of non-empty strings" )
# Build trie
__a = {}
__a = "WORD_KEEPER"
for word in words:
__a = trie
for c in word:
if c not in trie_node:
__a = {}
__a = trie_node[c]
__a = True
__a = len(__A )
# Dynamic programming method
@functools.cache
def is_breakable(a ) -> bool:
if index == len_string:
return True
__a = trie
for i in range(__A , __A ):
__a = trie_node.get(string[i] , __A )
if trie_node is None:
return False
if trie_node.get(__A , __A ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 261 | import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
UpperCamelCase__ = np.linspace(start=0, stop=7_5, num=7_5, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
UpperCamelCase__ = [0, 2_5, 5_0]
UpperCamelCase__ = [2_5, 5_0, 7_5]
UpperCamelCase__ = fuzz.membership.trimf(X, abca)
UpperCamelCase__ = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
UpperCamelCase__ = np.ones(7_5)
UpperCamelCase__ = np.zeros((7_5,))
# 1. Union = max(µA(x), µB(x))
UpperCamelCase__ = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
UpperCamelCase__ = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
UpperCamelCase__ = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
UpperCamelCase__ = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
UpperCamelCase__ = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
UpperCamelCase__ = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
UpperCamelCase__ = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
UpperCamelCase__ = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 1_0)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 65 | 0 |
"""simple docstring"""
import math
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
if not isinstance(__A , __A ):
_a : int = F"""Input value of [number={number}] must be an integer"""
raise TypeError(__A )
if number < 1:
_a : Tuple = F"""Input value of [number={number}] must be > 0"""
raise ValueError(__A )
elif number == 1:
return 3
elif number == 2:
return 5
else:
_a : Dict = int(math.log(number // 3 , 2 ) ) + 2
_a : Tuple = [3, 5]
_a : Optional[int] = 2
_a : Union[str, Any] = 3
for block in range(1 , __A ):
for _ in range(__A ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
_snake_case = 0
try:
_snake_case = proth(number)
except ValueError:
print(F'''ValueError: there is no {number}th Proth number''')
continue
print(F'''The {number}th Proth number: {value}''')
| 294 | from __future__ import annotations
from collections import deque
class A :
def __init__(self : Dict , __UpperCAmelCase : list[str] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = []
self.adlist.append(
{"value": "", "next_states": [], "fail_state": 0, "output": []} )
for keyword in keywords:
self.add_keyword(__UpperCAmelCase )
self.set_fail_transitions()
def lowercase_ (self : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : str ) -> int | None:
"""simple docstring"""
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def lowercase_ (self : Dict , __UpperCAmelCase : str ) -> None:
"""simple docstring"""
UpperCAmelCase__ = 0
for character in keyword:
UpperCAmelCase__ = self.find_next_state(__UpperCAmelCase , __UpperCAmelCase )
if next_state is None:
self.adlist.append(
{
"value": character,
"next_states": [],
"fail_state": 0,
"output": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
UpperCAmelCase__ = len(self.adlist ) - 1
else:
UpperCAmelCase__ = next_state
self.adlist[current_state]["output"].append(__UpperCAmelCase )
def lowercase_ (self : Optional[int] ) -> None:
"""simple docstring"""
UpperCAmelCase__ = deque()
for node in self.adlist[0]["next_states"]:
q.append(__UpperCAmelCase )
UpperCAmelCase__ = 0
while q:
UpperCAmelCase__ = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(__UpperCAmelCase )
UpperCAmelCase__ = self.adlist[r]["fail_state"]
while (
self.find_next_state(__UpperCAmelCase , self.adlist[child]["value"] ) is None
and state != 0
):
UpperCAmelCase__ = self.adlist[state]["fail_state"]
UpperCAmelCase__ = self.find_next_state(
__UpperCAmelCase , self.adlist[child]["value"] )
if self.adlist[child]["fail_state"] is None:
UpperCAmelCase__ = 0
UpperCAmelCase__ = (
self.adlist[child]["output"]
+ self.adlist[self.adlist[child]["fail_state"]]["output"]
)
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : str ) -> dict[str, list[int]]:
"""simple docstring"""
UpperCAmelCase__ = {} # returns a dict with keywords and list of its occurrences
UpperCAmelCase__ = 0
for i in range(len(__UpperCAmelCase ) ):
while (
self.find_next_state(__UpperCAmelCase , string[i] ) is None
and current_state != 0
):
UpperCAmelCase__ = self.adlist[current_state]["fail_state"]
UpperCAmelCase__ = self.find_next_state(__UpperCAmelCase , string[i] )
if next_state is None:
UpperCAmelCase__ = 0
else:
UpperCAmelCase__ = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
UpperCAmelCase__ = []
result[key].append(i - len(__UpperCAmelCase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 | 0 |
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'''vocab_file''': '''vocab.json''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
'''merges_file''': '''merges.txt''',
}
A_ = {
'''vocab_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'''
),
},
'''tokenizer_config_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'''
),
},
'''merges_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'''
),
},
}
A_ = '''</w>'''
A_ = '''@@ '''
def UpperCAmelCase__ (snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : Tuple = set()
_snake_case : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_snake_case : Tuple = char
return pairs
# Speech2Text2 has no max input length
A_ = {'''facebook/s2t-wav2vec2-large-en-de''': 10_24}
class lowercase( UpperCAmelCase_ ):
'''simple docstring'''
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ['input_ids', 'attention_mask']
def __init__( self: Tuple, a_: List[Any], a_: Dict="<s>", a_: Tuple="<pad>", a_: str="</s>", a_: int="<unk>", a_: List[str]=False, a_: str=None, **a_: Optional[Any], ):
'''simple docstring'''
super().__init__(
unk_token=__UpperCAmelCase, bos_token=__UpperCAmelCase, eos_token=__UpperCAmelCase, pad_token=__UpperCAmelCase, do_lower_case=__UpperCAmelCase, **__UpperCAmelCase, )
_snake_case : int = do_lower_case
with open(__UpperCAmelCase, encoding="""utf-8""" ) as vocab_handle:
_snake_case : Tuple = json.load(__UpperCAmelCase )
_snake_case : List[str] = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f"No merges files provided. {self.__class__.__name__} can only be used for decoding." )
_snake_case : List[str] = None
_snake_case : List[Any] = None
else:
with open(__UpperCAmelCase, encoding="""utf-8""" ) as merges_handle:
_snake_case : Union[str, Any] = merges_handle.read().split("""\n""" )[:-1]
_snake_case : List[Any] = [tuple(merge.split()[:2] ) for merge in merges]
_snake_case : Tuple = dict(zip(__UpperCAmelCase, range(len(__UpperCAmelCase ) ) ) )
_snake_case : List[str] = {}
@property
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
return len(self.decoder )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
return dict(self.encoder, **self.added_tokens_encoder )
def UpperCamelCase_ ( self: Dict, a_: Union[str, Any] ):
'''simple docstring'''
_snake_case : int = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
_snake_case : Optional[Any] = get_pairs(__UpperCAmelCase )
if not pairs:
return token
while True:
_snake_case : int = min(__UpperCAmelCase, key=lambda a_ : self.bpe_ranks.get(__UpperCAmelCase, float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
_snake_case , _snake_case : str = bigram
_snake_case : Union[str, Any] = []
_snake_case : List[Any] = 0
while i < len(__UpperCAmelCase ):
try:
_snake_case : List[Any] = word.index(__UpperCAmelCase, __UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_snake_case : Any = j
if word[i] == first and i < len(__UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_snake_case : int = tuple(__UpperCAmelCase )
_snake_case : Dict = new_word
if len(__UpperCAmelCase ) == 1:
break
else:
_snake_case : List[str] = get_pairs(__UpperCAmelCase )
_snake_case : Union[str, Any] = """ """.join(__UpperCAmelCase )
if word == "\n " + BPE_TOKEN_MERGES:
_snake_case : Optional[Any] = """\n""" + BPE_TOKEN_MERGES
if word.endswith(__UpperCAmelCase ):
_snake_case : Tuple = word.replace(__UpperCAmelCase, """""" )
_snake_case : Tuple = word.replace(""" """, __UpperCAmelCase )
_snake_case : Any = word
return word
def UpperCamelCase_ ( self: Tuple, a_: int ):
'''simple docstring'''
if self.bpe_ranks is None:
raise ValueError(
"""This tokenizer was instantiated without a `merges.txt` file, so"""
""" that it can only be used for decoding, not for encoding."""
"""Make sure to provide `merges.txt` file at instantiation to enable """
"""encoding.""" )
if self.do_lower_case:
_snake_case : List[str] = text.lower()
_snake_case : Union[str, Any] = text.split()
_snake_case : List[str] = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(__UpperCAmelCase ).split(""" """ ) ) )
return split_tokens
def UpperCamelCase_ ( self: Union[str, Any], a_: str ):
'''simple docstring'''
return self.encoder.get(__UpperCAmelCase, self.encoder.get(self.unk_token ) )
def UpperCamelCase_ ( self: Any, a_: int ):
'''simple docstring'''
_snake_case : Any = self.decoder.get(__UpperCAmelCase, self.unk_token )
return result
def UpperCamelCase_ ( self: Dict, a_: List[str] ):
'''simple docstring'''
_snake_case : Optional[int] = """ """.join(__UpperCAmelCase )
# make sure @@ tokens are concatenated
_snake_case : List[str] = """""".join(string.split(__UpperCAmelCase ) )
return string
def UpperCamelCase_ ( self: Union[str, Any], a_: str, a_: Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_snake_case : List[Any] = os.path.join(
__UpperCAmelCase, (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_snake_case : Dict = os.path.join(
__UpperCAmelCase, (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(__UpperCAmelCase, """w""", encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder, indent=2, sort_keys=__UpperCAmelCase, ensure_ascii=__UpperCAmelCase ) + """\n""" )
_snake_case : Any = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(__UpperCAmelCase, """w""", encoding="""utf-8""" ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda a_ : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."
""" Please check that the tokenizer is not corrupted!""" )
_snake_case : Any = token_index
writer.write(""" """.join(__UpperCAmelCase ) + """\n""" )
index += 1
return (vocab_file, merges_file)
| 64 | import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
UpperCamelCase__ = logging.get_logger(__name__)
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : int = ['input_values', 'attention_mask']
def __init__(self : Any , __UpperCAmelCase : int = 1 , __UpperCAmelCase : int = 1_6_0_0_0 , __UpperCAmelCase : float = 0.0 , __UpperCAmelCase : bool = False , __UpperCAmelCase : int = 8_0 , __UpperCAmelCase : int = 1_6 , __UpperCAmelCase : int = 6_4 , __UpperCAmelCase : str = "hann_window" , __UpperCAmelCase : float = 1.0 , __UpperCAmelCase : float = 8_0 , __UpperCAmelCase : float = 7_6_0_0 , __UpperCAmelCase : float = 1E-10 , __UpperCAmelCase : int = 2 , __UpperCAmelCase : bool = True , **__UpperCAmelCase : Any , ) -> str:
"""simple docstring"""
super().__init__(feature_size=__UpperCAmelCase , sampling_rate=__UpperCAmelCase , padding_value=__UpperCAmelCase , **__UpperCAmelCase )
UpperCAmelCase__ = do_normalize
UpperCAmelCase__ = return_attention_mask
UpperCAmelCase__ = num_mel_bins
UpperCAmelCase__ = hop_length
UpperCAmelCase__ = win_length
UpperCAmelCase__ = win_function
UpperCAmelCase__ = frame_signal_scale
UpperCAmelCase__ = fmin
UpperCAmelCase__ = fmax
UpperCAmelCase__ = mel_floor
UpperCAmelCase__ = reduction_factor
UpperCAmelCase__ = win_length * sampling_rate // 1_0_0_0
UpperCAmelCase__ = hop_length * sampling_rate // 1_0_0_0
UpperCAmelCase__ = optimal_fft_length(self.sample_size )
UpperCAmelCase__ = (self.n_fft // 2) + 1
UpperCAmelCase__ = window_function(window_length=self.sample_size , name=self.win_function , periodic=__UpperCAmelCase )
UpperCAmelCase__ = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm="slaney" , mel_scale="slaney" , )
if frame_signal_scale != 1.0:
warnings.warn(
"The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers" , __UpperCAmelCase , )
if reduction_factor != 2.0:
warnings.warn(
"The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers" , __UpperCAmelCase , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def lowercase_ (__UpperCAmelCase : List[np.ndarray] , __UpperCAmelCase : List[np.ndarray] , __UpperCAmelCase : float = 0.0 ) -> List[np.ndarray]:
"""simple docstring"""
if attention_mask is not None:
UpperCAmelCase__ = np.array(__UpperCAmelCase , np.intaa )
UpperCAmelCase__ = []
for vector, length in zip(__UpperCAmelCase , attention_mask.sum(-1 ) ):
UpperCAmelCase__ = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
UpperCAmelCase__ = padding_value
normed_input_values.append(__UpperCAmelCase )
else:
UpperCAmelCase__ = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def lowercase_ (self : Optional[int] , __UpperCAmelCase : np.ndarray , ) -> np.ndarray:
"""simple docstring"""
UpperCAmelCase__ = spectrogram(
__UpperCAmelCase , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel="log10" , )
return log_mel_spec.T
def __call__(self : Any , __UpperCAmelCase : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , __UpperCAmelCase : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , __UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : bool = False , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[Union[str, TensorType]] = None , __UpperCAmelCase : Optional[int] = None , **__UpperCAmelCase : str , ) -> BatchFeature:
"""simple docstring"""
if audio is None and audio_target is None:
raise ValueError("You must provide either `audio` or `audio_target` values." )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
if audio is not None:
UpperCAmelCase__ = self._process_audio(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , )
else:
UpperCAmelCase__ = None
if audio_target is not None:
UpperCAmelCase__ = self._process_audio(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , )
if inputs is None:
return inputs_target
else:
UpperCAmelCase__ = inputs_target["input_values"]
UpperCAmelCase__ = inputs_target.get("attention_mask" )
if decoder_attention_mask is not None:
UpperCAmelCase__ = decoder_attention_mask
return inputs
def lowercase_ (self : Optional[int] , __UpperCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __UpperCAmelCase : bool = False , __UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : bool = False , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[Union[str, TensorType]] = None , **__UpperCAmelCase : Any , ) -> BatchFeature:
"""simple docstring"""
UpperCAmelCase__ = isinstance(__UpperCAmelCase , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
UpperCAmelCase__ = is_batched_numpy or (
isinstance(__UpperCAmelCase , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCAmelCase__ = [np.asarray(__UpperCAmelCase , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(__UpperCAmelCase , np.ndarray ):
UpperCAmelCase__ = np.asarray(__UpperCAmelCase , dtype=np.floataa )
elif isinstance(__UpperCAmelCase , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
UpperCAmelCase__ = speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCAmelCase__ = [speech]
# needed to make pad() work on spectrogram inputs
UpperCAmelCase__ = self.feature_size
# convert into correct format for padding
if is_target:
UpperCAmelCase__ = [self._extract_mel_features(__UpperCAmelCase ) for waveform in speech]
UpperCAmelCase__ = BatchFeature({"input_values": features} )
UpperCAmelCase__ = self.num_mel_bins
else:
UpperCAmelCase__ = BatchFeature({"input_values": speech} )
UpperCAmelCase__ = self.pad(
__UpperCAmelCase , padding=__UpperCAmelCase , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , **__UpperCAmelCase , )
UpperCAmelCase__ = feature_size_hack
# convert input values to correct format
UpperCAmelCase__ = padded_inputs["input_values"]
if not isinstance(input_values[0] , np.ndarray ):
UpperCAmelCase__ = [np.asarray(__UpperCAmelCase , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(__UpperCAmelCase , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
UpperCAmelCase__ = [array.astype(np.floataa ) for array in input_values]
elif isinstance(__UpperCAmelCase , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
UpperCAmelCase__ = input_values.astype(np.floataa )
# convert attention_mask to correct format
UpperCAmelCase__ = padded_inputs.get("attention_mask" )
if attention_mask is not None:
UpperCAmelCase__ = [np.asarray(__UpperCAmelCase , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
UpperCAmelCase__ = (
attention_mask
if self._get_padding_strategies(__UpperCAmelCase , max_length=__UpperCAmelCase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
UpperCAmelCase__ = self.zero_mean_unit_var_norm(
padded_inputs["input_values"] , attention_mask=__UpperCAmelCase , padding_value=self.padding_value )
if return_tensors is not None:
UpperCAmelCase__ = padded_inputs.convert_to_tensors(__UpperCAmelCase )
return padded_inputs
def lowercase_ (self : Tuple ) -> Dict[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = super().to_dict()
# Don't serialize these as they are derived from the other properties.
UpperCAmelCase__ = ["window", "mel_filters", "sample_size", "sample_stride", "n_fft", "n_freqs"]
for name in names:
if name in output:
del output[name]
return output
| 65 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.