code stringlengths 87 55.2k | code_codestyle int64 0 349 | style_context stringlengths 135 49.1k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class A_ ( _lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = (PNDMScheduler,)
SCREAMING_SNAKE_CASE_ = (('''num_inference_steps''', 50),)
def UpperCAmelCase__ ( self :Optional[int] , **lowerCamelCase_ :List[Any] ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] ={
'num_train_timesteps': 1_000,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**A_ )
return config
def UpperCAmelCase__ ( self :Union[str, Any] , lowerCamelCase_ :Optional[int]=0 , **lowerCamelCase_ :Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : str =dict(self.forward_default_kwargs )
lowerCamelCase__ : List[Any] =kwargs.pop('num_inference_steps' , A_ )
lowerCamelCase__ : Dict =self.dummy_sample
lowerCamelCase__ : List[Any] =0.1 * sample
lowerCamelCase__ : str =[residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowerCamelCase__ : Tuple =self.get_scheduler_config(**A_ )
lowerCamelCase__ : Tuple =scheduler_class(**A_ )
scheduler.set_timesteps(A_ )
# copy over dummy past residuals
lowerCamelCase__ : Optional[int] =dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A_ )
lowerCamelCase__ : str =scheduler_class.from_pretrained(A_ )
new_scheduler.set_timesteps(A_ )
# copy over dummy past residuals
lowerCamelCase__ : int =dummy_past_residuals[:]
lowerCamelCase__ : Any =scheduler.step_prk(A_ , A_ , A_ , **A_ ).prev_sample
lowerCamelCase__ : Any =new_scheduler.step_prk(A_ , A_ , A_ , **A_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
lowerCamelCase__ : Optional[Any] =scheduler.step_plms(A_ , A_ , A_ , **A_ ).prev_sample
lowerCamelCase__ : Optional[int] =new_scheduler.step_plms(A_ , A_ , A_ , **A_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase__ ( self :Optional[Any] ):
"""simple docstring"""
pass
def UpperCAmelCase__ ( self :Any , lowerCamelCase_ :List[Any]=0 , **lowerCamelCase_ :List[str] ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =dict(self.forward_default_kwargs )
lowerCamelCase__ : int =kwargs.pop('num_inference_steps' , A_ )
lowerCamelCase__ : Tuple =self.dummy_sample
lowerCamelCase__ : Union[str, Any] =0.1 * sample
lowerCamelCase__ : int =[residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowerCamelCase__ : Tuple =self.get_scheduler_config()
lowerCamelCase__ : Union[str, Any] =scheduler_class(**A_ )
scheduler.set_timesteps(A_ )
# copy over dummy past residuals (must be after setting timesteps)
lowerCamelCase__ : Any =dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A_ )
lowerCamelCase__ : str =scheduler_class.from_pretrained(A_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(A_ )
# copy over dummy past residual (must be after setting timesteps)
lowerCamelCase__ : Tuple =dummy_past_residuals[:]
lowerCamelCase__ : List[str] =scheduler.step_prk(A_ , A_ , A_ , **A_ ).prev_sample
lowerCamelCase__ : List[str] =new_scheduler.step_prk(A_ , A_ , A_ , **A_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
lowerCamelCase__ : Tuple =scheduler.step_plms(A_ , A_ , A_ , **A_ ).prev_sample
lowerCamelCase__ : str =new_scheduler.step_plms(A_ , A_ , A_ , **A_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase__ ( self :Optional[Any] , **lowerCamelCase_ :List[Any] ):
"""simple docstring"""
lowerCamelCase__ : Dict =self.scheduler_classes[0]
lowerCamelCase__ : Optional[int] =self.get_scheduler_config(**A_ )
lowerCamelCase__ : List[str] =scheduler_class(**A_ )
lowerCamelCase__ : Any =10
lowerCamelCase__ : List[str] =self.dummy_model()
lowerCamelCase__ : str =self.dummy_sample_deter
scheduler.set_timesteps(A_ )
for i, t in enumerate(scheduler.prk_timesteps ):
lowerCamelCase__ : Union[str, Any] =model(A_ , A_ )
lowerCamelCase__ : str =scheduler.step_prk(A_ , A_ , A_ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
lowerCamelCase__ : Dict =model(A_ , A_ )
lowerCamelCase__ : Tuple =scheduler.step_plms(A_ , A_ , A_ ).prev_sample
return sample
def UpperCAmelCase__ ( self :int ):
"""simple docstring"""
lowerCamelCase__ : List[str] =dict(self.forward_default_kwargs )
lowerCamelCase__ : Optional[int] =kwargs.pop('num_inference_steps' , A_ )
for scheduler_class in self.scheduler_classes:
lowerCamelCase__ : List[str] =self.get_scheduler_config()
lowerCamelCase__ : Optional[Any] =scheduler_class(**A_ )
lowerCamelCase__ : List[Any] =self.dummy_sample
lowerCamelCase__ : Any =0.1 * sample
if num_inference_steps is not None and hasattr(A_ , 'set_timesteps' ):
scheduler.set_timesteps(A_ )
elif num_inference_steps is not None and not hasattr(A_ , 'set_timesteps' ):
lowerCamelCase__ : Optional[int] =num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowerCamelCase__ : int =[residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
lowerCamelCase__ : str =dummy_past_residuals[:]
lowerCamelCase__ : Tuple =scheduler.step_prk(A_ , 0 , A_ , **A_ ).prev_sample
lowerCamelCase__ : str =scheduler.step_prk(A_ , 1 , A_ , **A_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
lowerCamelCase__ : Union[str, Any] =scheduler.step_plms(A_ , 0 , A_ , **A_ ).prev_sample
lowerCamelCase__ : int =scheduler.step_plms(A_ , 1 , A_ , **A_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=A_ )
def UpperCAmelCase__ ( self :Optional[int] ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=A_ )
lowerCamelCase__ : Dict =self.scheduler_classes[0]
lowerCamelCase__ : str =self.get_scheduler_config(steps_offset=1 )
lowerCamelCase__ : Optional[int] =scheduler_class(**A_ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def UpperCAmelCase__ ( self :Tuple ):
"""simple docstring"""
for beta_start, beta_end in zip([0.00_01, 0.0_01] , [0.0_02, 0.02] ):
self.check_over_configs(beta_start=A_ , beta_end=A_ )
def UpperCAmelCase__ ( self :List[Any] ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=A_ )
def UpperCAmelCase__ ( self :List[str] ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A_ )
def UpperCAmelCase__ ( self :List[str] ):
"""simple docstring"""
for t in [1, 5, 10]:
self.check_over_forward(time_step=A_ )
def UpperCAmelCase__ ( self :Any ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=A_ )
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : Dict =27
for scheduler_class in self.scheduler_classes:
lowerCamelCase__ : int =self.dummy_sample
lowerCamelCase__ : Any =0.1 * sample
lowerCamelCase__ : List[str] =self.get_scheduler_config()
lowerCamelCase__ : Tuple =scheduler_class(**A_ )
scheduler.set_timesteps(A_ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
lowerCamelCase__ : Optional[int] =scheduler.step_prk(A_ , A_ , A_ ).prev_sample
def UpperCAmelCase__ ( self :Any ):
"""simple docstring"""
with self.assertRaises(A_ ):
lowerCamelCase__ : Dict =self.scheduler_classes[0]
lowerCamelCase__ : Optional[int] =self.get_scheduler_config()
lowerCamelCase__ : int =scheduler_class(**A_ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def UpperCAmelCase__ ( self :List[str] ):
"""simple docstring"""
lowerCamelCase__ : Dict =self.full_loop()
lowerCamelCase__ : List[str] =torch.sum(torch.abs(A_ ) )
lowerCamelCase__ : List[Any] =torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 198.1_318 ) < 1e-2
assert abs(result_mean.item() - 0.25_80 ) < 1e-3
def UpperCAmelCase__ ( self :Dict ):
"""simple docstring"""
lowerCamelCase__ : str =self.full_loop(prediction_type='v_prediction' )
lowerCamelCase__ : int =torch.sum(torch.abs(A_ ) )
lowerCamelCase__ : List[str] =torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 67.39_86 ) < 1e-2
assert abs(result_mean.item() - 0.08_78 ) < 1e-3
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
lowerCamelCase__ : int =self.full_loop(set_alpha_to_one=A_ , beta_start=0.01 )
lowerCamelCase__ : List[str] =torch.sum(torch.abs(A_ ) )
lowerCamelCase__ : List[str] =torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 230.0_399 ) < 1e-2
assert abs(result_mean.item() - 0.29_95 ) < 1e-3
def UpperCAmelCase__ ( self :Tuple ):
"""simple docstring"""
lowerCamelCase__ : int =self.full_loop(set_alpha_to_one=A_ , beta_start=0.01 )
lowerCamelCase__ : List[Any] =torch.sum(torch.abs(A_ ) )
lowerCamelCase__ : List[Any] =torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 186.9_482 ) < 1e-2
assert abs(result_mean.item() - 0.24_34 ) < 1e-3 | 126 |
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _snake_case ( snake_case__ : List[Any] , snake_case__ : Optional[int]=0.999 , snake_case__ : Union[str, Any]="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(snake_case__ : Union[str, Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(snake_case__ : Dict ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' )
A = []
for i in range(snake_case__ ):
A = i / num_diffusion_timesteps
A = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(snake_case__ ) / alpha_bar_fn(snake_case__ ) , snake_case__ ) )
return torch.tensor(snake_case__ , dtype=torch.floataa )
class lowerCAmelCase_ ( _lowercase , _lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[int] = [e.name for e in KarrasDiffusionSchedulers]
_lowerCamelCase: Optional[Any] = 2
@register_to_config
def __init__( self : str ,A_ : int = 1000 ,A_ : float = 0.0_00_85 ,A_ : float = 0.0_12 ,A_ : str = "linear" ,A_ : Optional[Union[np.ndarray, List[float]]] = None ,A_ : str = "epsilon" ,A_ : Optional[bool] = False ,A_ : Optional[bool] = False ,A_ : float = 1.0 ,A_ : str = "linspace" ,A_ : int = 0 ,) -> List[str]:
if trained_betas is not None:
A = torch.tensor(A_ ,dtype=torch.floataa )
elif beta_schedule == "linear":
A = torch.linspace(A_ ,A_ ,A_ ,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
A = (
torch.linspace(beta_start**0.5 ,beta_end**0.5 ,A_ ,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
A = betas_for_alpha_bar(A_ ,alpha_transform_type='cosine' )
elif beta_schedule == "exp":
A = betas_for_alpha_bar(A_ ,alpha_transform_type='exp' )
else:
raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}' )
A = 1.0 - self.betas
A = torch.cumprod(self.alphas ,dim=0 )
# set all values
self.set_timesteps(A_ ,A_ ,A_ )
A = use_karras_sigmas
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Tuple ,A_ : Tuple=None ) -> Tuple:
if schedule_timesteps is None:
A = self.timesteps
A = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
A = 1 if len(A_ ) > 1 else 0
else:
A = timestep.cpu().item() if torch.is_tensor(A_ ) else timestep
A = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : torch.FloatTensor ,A_ : Union[float, torch.FloatTensor] ,) -> torch.FloatTensor:
A = self.index_for_timestep(A_ )
A = self.sigmas[step_index]
A = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : int ,A_ : Union[str, torch.device] = None ,A_ : Optional[int] = None ,) -> Optional[Any]:
A = num_inference_steps
A = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
A = np.linspace(0 ,num_train_timesteps - 1 ,A_ ,dtype=A_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
A = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
A = (np.arange(0 ,A_ ) * step_ratio).round()[::-1].copy().astype(A_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
A = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
A = (np.arange(A_ ,0 ,-step_ratio )).round().copy().astype(A_ )
timesteps -= 1
else:
raise ValueError(
F'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' )
A = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
A = np.log(A_ )
A = np.interp(A_ ,np.arange(0 ,len(A_ ) ) ,A_ )
if self.config.use_karras_sigmas:
A = self._convert_to_karras(in_sigmas=A_ ,num_inference_steps=self.num_inference_steps )
A = np.array([self._sigma_to_t(A_ ,A_ ) for sigma in sigmas] )
A = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
A = torch.from_numpy(A_ ).to(device=A_ )
A = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
A = torch.from_numpy(A_ )
A = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(A_ ).startswith('mps' ):
# mps does not support float64
A = timesteps.to(A_ ,dtype=torch.floataa )
else:
A = timesteps.to(device=A_ )
# empty dt and derivative
A = None
A = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
A = defaultdict(A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[Any] ,A_ : List[str] ) -> Dict:
# get log sigma
A = np.log(A_ )
# get distribution
A = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
A = np.cumsum((dists >= 0) ,axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
A = low_idx + 1
A = log_sigmas[low_idx]
A = log_sigmas[high_idx]
# interpolate sigmas
A = (low - log_sigma) / (low - high)
A = np.clip(A_ ,0 ,1 )
# transform interpolation to time range
A = (1 - w) * low_idx + w * high_idx
A = t.reshape(sigma.shape )
return t
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : torch.FloatTensor ,A_ : int ) -> torch.FloatTensor:
A = in_sigmas[-1].item()
A = in_sigmas[0].item()
A = 7.0 # 7.0 is the value used in the paper
A = np.linspace(0 ,1 ,A_ )
A = sigma_min ** (1 / rho)
A = sigma_max ** (1 / rho)
A = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
return self.dt is None
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Union[torch.FloatTensor, np.ndarray] ,A_ : Union[float, torch.FloatTensor] ,A_ : Union[torch.FloatTensor, np.ndarray] ,A_ : bool = True ,) -> Union[SchedulerOutput, Tuple]:
A = self.index_for_timestep(A_ )
# advance index counter by 1
A = timestep.cpu().item() if torch.is_tensor(A_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
A = self.sigmas[step_index]
A = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
A = self.sigmas[step_index - 1]
A = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
A = 0
A = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
A = sigma_hat if self.state_in_first_order else sigma_next
A = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
A = sigma_hat if self.state_in_first_order else sigma_next
A = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
A = model_output
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' )
if self.config.clip_sample:
A = pred_original_sample.clamp(
-self.config.clip_sample_range ,self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
A = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
A = sigma_next - sigma_hat
# store for 2nd order step
A = derivative
A = dt
A = sample
else:
# 2. 2nd order / Heun's method
A = (sample - pred_original_sample) / sigma_next
A = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
A = self.dt
A = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
A = None
A = None
A = None
A = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=A_ )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : torch.FloatTensor ,A_ : torch.FloatTensor ,A_ : torch.FloatTensor ,) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
A = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(A_ ):
# mps does not support float64
A = self.timesteps.to(original_samples.device ,dtype=torch.floataa )
A = timesteps.to(original_samples.device ,dtype=torch.floataa )
else:
A = self.timesteps.to(original_samples.device )
A = timesteps.to(original_samples.device )
A = [self.index_for_timestep(A_ ,A_ ) for t in timesteps]
A = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
A = sigma.unsqueeze(-1 )
A = original_samples + noise * sigma
return noisy_samples
def __len__( self : Dict ) -> int:
return self.config.num_train_timesteps | 74 | 0 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
lowerCamelCase = datasets.utils.logging.get_logger(__name__)
@dataclass
class __magic_name__ ( datasets.BuilderConfig ):
'''simple docstring'''
lowerCamelCase__ : Optional[datasets.Features] = None
lowerCamelCase__ : str = "utf-8"
lowerCamelCase__ : Optional[str] = None
lowerCamelCase__ : Optional[str] = None
lowerCamelCase__ : bool = True # deprecated
lowerCamelCase__ : Optional[int] = None # deprecated
lowerCamelCase__ : int = 10 << 20 # 10MB
lowerCamelCase__ : Optional[bool] = None
class __magic_name__ ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = JsonConfig
def _UpperCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
if self.config.block_size is not None:
logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' )
a__ =self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' )
if self.config.newlines_in_values is not None:
raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' )
return datasets.DatasetInfo(features=self.config.features )
def _UpperCAmelCase ( self, lowercase_ ) -> List[Any]:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
a__ =dl_manager.download_and_extract(self.config.data_files )
if isinstance(A_, (str, list, tuple) ):
a__ =data_files
if isinstance(A_, A_ ):
a__ =[files]
a__ =[dl_manager.iter_files(A_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={'''files''': files} )]
a__ =[]
for split_name, files in data_files.items():
if isinstance(A_, A_ ):
a__ =[files]
a__ =[dl_manager.iter_files(A_ ) for file in files]
splits.append(datasets.SplitGenerator(name=A_, gen_kwargs={'''files''': files} ) )
return splits
def _UpperCAmelCase ( self, lowercase_ ) -> pa.Table:
"""simple docstring"""
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
a__ =self.config.features.arrow_schema.field(A_ ).type
a__ =pa_table.append_column(A_, pa.array([None] * len(A_ ), type=A_ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
a__ =table_cast(A_, self.config.features.arrow_schema )
return pa_table
def _UpperCAmelCase ( self, lowercase_ ) -> List[str]:
"""simple docstring"""
for file_idx, file in enumerate(itertools.chain.from_iterable(A_ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(A_, encoding=self.config.encoding, errors=self.config.encoding_errors ) as f:
a__ =json.load(A_ )
# We keep only the field we are interested in
a__ =dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(A_, (list, tuple) ):
a__ =set().union(*[row.keys() for row in dataset] )
a__ ={col: [row.get(A_ ) for row in dataset] for col in keys}
else:
a__ =dataset
a__ =pa.Table.from_pydict(A_ )
yield file_idx, self._cast_table(A_ )
# If the file has one json object per line
else:
with open(A_, '''rb''' ) as f:
a__ =0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
a__ =max(self.config.chunksize // 32, 16 << 10 )
a__ =(
self.config.encoding_errors if self.config.encoding_errors is not None else '''strict'''
)
while True:
a__ =f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(A_ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
a__ =batch.decode(self.config.encoding, errors=A_ ).encode('''utf-8''' )
try:
while True:
try:
a__ =paj.read_json(
io.BytesIO(A_ ), read_options=paj.ReadOptions(block_size=A_ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(A_, pa.ArrowInvalid )
and "straddling" not in str(A_ )
or block_size > len(A_ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"""Batch of {len(A_ )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
A_, encoding=self.config.encoding, errors=self.config.encoding_errors ) as f:
a__ =json.load(A_ )
except json.JSONDecodeError:
logger.error(F"""Failed to read file \'{file}\' with error {type(A_ )}: {e}""" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(A_, A_ ): # list is the only sequence type supported in JSON
try:
a__ =set().union(*[row.keys() for row in dataset] )
a__ ={col: [row.get(A_ ) for row in dataset] for col in keys}
a__ =pa.Table.from_pydict(A_ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"""Failed to read file \'{file}\' with error {type(A_ )}: {e}""" )
raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None
yield file_idx, self._cast_table(A_ )
break
else:
logger.error(F"""Failed to read file \'{file}\' with error {type(A_ )}: {e}""" )
raise ValueError(
F"""Not able to read records in the JSON file at {file}. """
F"""You should probably indicate the field of the JSON file containing your records. """
F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """
F"""Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. """ ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(A_ )
batch_idx += 1
| 188 |
"""simple docstring"""
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Dict ,A_ : list[int] ) -> None:
A = len(A_ )
A = [0] * len_array
if len_array > 0:
A = array[0]
for i in range(1 ,A_ ):
A = self.prefix_sum[i - 1] + array[i]
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : int ,A_ : int ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : int ) -> bool:
A = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(A_ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod() | 74 | 0 |
'''simple docstring'''
from __future__ import annotations
def a__ ( a__ , a__ , a__ , ):
"""simple docstring"""
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative in a semiconductor""" )
elif hole_conc < 0:
raise ValueError("""Hole concentration cannot be negative in a semiconductor""" )
elif intrinsic_conc < 0:
raise ValueError(
"""Intrinsic concentration cannot be negative in a semiconductor""" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 267 |
"""simple docstring"""
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
def _snake_case ( snake_case__ : str , snake_case__ : str ):
A = RobertaPreLayerNormConfig.from_pretrained(
snake_case__ , architectures=['RobertaPreLayerNormForMaskedLM'] )
# convert state_dict
A = torch.load(hf_hub_download(repo_id=snake_case__ , filename='pytorch_model.bin' ) )
A = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('roberta.' ):
A = 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ):
continue
A = tensor_value
A = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=snake_case__ , config=snake_case__ , state_dict=snake_case__ )
model.save_pretrained(snake_case__ )
# convert tokenizer
A = AutoTokenizer.from_pretrained(snake_case__ )
tokenizer.save_pretrained(snake_case__ )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint-repo''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowercase = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path) | 74 | 0 |
class a_ :
"""simple docstring"""
def __init__( self : Dict ,snake_case : list[int] ):
SCREAMING_SNAKE_CASE =len(A_ )
SCREAMING_SNAKE_CASE =[0] * len_array
if len_array > 0:
SCREAMING_SNAKE_CASE =array[0]
for i in range(1 ,A_ ):
SCREAMING_SNAKE_CASE =self.prefix_sum[i - 1] + array[i]
def _lowerCAmelCase ( self : str ,snake_case : int ,snake_case : int ):
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def _lowerCAmelCase ( self : str ,snake_case : int ):
SCREAMING_SNAKE_CASE ={0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(A_ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 334 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'''
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[Any] = '''roformer'''
def __init__( self : Tuple ,A_ : Optional[int]=5_0000 ,A_ : Tuple=None ,A_ : Optional[Any]=768 ,A_ : Dict=12 ,A_ : Optional[int]=12 ,A_ : Union[str, Any]=3072 ,A_ : Dict="gelu" ,A_ : Dict=0.1 ,A_ : List[Any]=0.1 ,A_ : List[Any]=1536 ,A_ : List[str]=2 ,A_ : Any=0.02 ,A_ : str=1e-12 ,A_ : Optional[int]=0 ,A_ : List[str]=False ,A_ : Tuple=True ,**A_ : List[str] ,) -> Dict:
super().__init__(pad_token_id=A_ ,**A_ )
A = vocab_size
A = hidden_size if embedding_size is None else embedding_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = hidden_act
A = intermediate_size
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = initializer_range
A = layer_norm_eps
A = rotary_value
A = use_cache
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
A = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A = {0: 'batch', 1: 'sequence'}
A = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] ) | 74 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase__( _lowercase , unittest.TestCase ):
lowerCAmelCase__ : Tuple = GPTaTokenizer
lowerCAmelCase__ : Tuple = GPTaTokenizerFast
lowerCAmelCase__ : str = True
lowerCAmelCase__ : Any = {'''add_prefix_space''': True}
lowerCAmelCase__ : List[Any] = False
def snake_case__ ( self ) -> List[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A__ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
A__ = dict(zip(A_ ,range(len(A_ ) ) ) )
A__ = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
A__ = {'unk_token': '<unk>'}
A__ = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
A__ = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(A_ ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(A_ ) )
def snake_case__ ( self ,**__UpperCAmelCase ) -> Any:
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname ,**A_ )
def snake_case__ ( self ,**__UpperCAmelCase ) -> List[str]:
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname ,**A_ )
def snake_case__ ( self ,__UpperCAmelCase ) -> Union[str, Any]:
A__ = 'lower newer'
A__ = 'lower newer'
return input_text, output_text
def snake_case__ ( self ) -> List[Any]:
A__ = GPTaTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
A__ = 'lower newer'
A__ = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
A__ = tokenizer.tokenize(A_ ,add_prefix_space=A_ )
self.assertListEqual(A_ ,A_ )
A__ = tokens + [tokenizer.unk_token]
A__ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) ,A_ )
def snake_case__ ( self ) -> Optional[Any]:
if not self.test_rust_tokenizer:
return
A__ = self.get_tokenizer()
A__ = self.get_rust_tokenizer(add_prefix_space=A_ )
A__ = 'lower newer'
# Testing tokenization
A__ = tokenizer.tokenize(A_ ,add_prefix_space=A_ )
A__ = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ ,A_ )
# Testing conversion to ids without special tokens
A__ = tokenizer.encode(A_ ,add_special_tokens=A_ ,add_prefix_space=A_ )
A__ = rust_tokenizer.encode(A_ ,add_special_tokens=A_ )
self.assertListEqual(A_ ,A_ )
# Testing conversion to ids with special tokens
A__ = self.get_rust_tokenizer(add_prefix_space=A_ )
A__ = tokenizer.encode(A_ ,add_prefix_space=A_ )
A__ = rust_tokenizer.encode(A_ )
self.assertListEqual(A_ ,A_ )
# Testing the unknown token
A__ = tokens + [rust_tokenizer.unk_token]
A__ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(A_ ) ,A_ )
def snake_case__ ( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Any:
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def snake_case__ ( self ,__UpperCAmelCase=15 ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A__ = self.rust_tokenizer_class.from_pretrained(A_ ,**A_ )
# Simple input
A__ = 'This is a simple input'
A__ = ['This is a simple input 1', 'This is a simple input 2']
A__ = ('This is a simple input', 'This is a pair')
A__ = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(A_ ,tokenizer_r.encode ,A_ ,max_length=A_ ,padding='max_length' )
# Simple input
self.assertRaises(A_ ,tokenizer_r.encode_plus ,A_ ,max_length=A_ ,padding='max_length' )
# Simple input
self.assertRaises(
A_ ,tokenizer_r.batch_encode_plus ,A_ ,max_length=A_ ,padding='max_length' ,)
# Pair input
self.assertRaises(A_ ,tokenizer_r.encode ,A_ ,max_length=A_ ,padding='max_length' )
# Pair input
self.assertRaises(A_ ,tokenizer_r.encode_plus ,A_ ,max_length=A_ ,padding='max_length' )
# Pair input
self.assertRaises(
A_ ,tokenizer_r.batch_encode_plus ,A_ ,max_length=A_ ,padding='max_length' ,)
def snake_case__ ( self ) -> List[Any]:
A__ = GPTaTokenizer.from_pretrained(self.tmpdirname ,pad_token='<pad>' )
# Simple input
A__ = 'This is a simple input'
A__ = ['This is a simple input looooooooong', 'This is a simple input']
A__ = ('This is a simple input', 'This is a pair')
A__ = [
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
A__ = tokenizer.pad_token_id
A__ = tokenizer(A_ ,padding='max_length' ,max_length=30 ,return_tensors='np' )
A__ = tokenizer(A_ ,padding=A_ ,truncate=A_ ,return_tensors='np' )
A__ = tokenizer(*A_ ,padding='max_length' ,max_length=60 ,return_tensors='np' )
A__ = tokenizer(A_ ,padding=A_ ,truncate=A_ ,return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] ,30 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] ,33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] ,60 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] ,52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def snake_case__ ( self ) -> Optional[int]:
A__ = '$$$'
A__ = GPTaTokenizer.from_pretrained(self.tmpdirname ,bos_token=A_ ,add_bos_token=A_ )
A__ = 'This is a simple input'
A__ = ['This is a simple input 1', 'This is a simple input 2']
A__ = tokenizer.bos_token_id
A__ = tokenizer(A_ )
A__ = tokenizer(A_ )
self.assertEqual(out_s.input_ids[0] ,A_ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
A__ = tokenizer.decode(out_s.input_ids )
A__ = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] ,A_ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def snake_case__ ( self ) -> int:
pass
def snake_case__ ( self ) -> Union[str, Any]:
# TODO: change to self.get_tokenizers() when the fast version is implemented
A__ = [self.get_tokenizer(do_lower_case=A_ ,add_bos_token=A_ )]
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
A__ = 'Encode this.'
A__ = 'This one too please.'
A__ = tokenizer.encode(A_ ,add_special_tokens=A_ )
encoded_sequence += tokenizer.encode(A_ ,add_special_tokens=A_ )
A__ = tokenizer.encode_plus(
A_ ,A_ ,add_special_tokens=A_ ,return_special_tokens_mask=A_ ,)
A__ = encoded_sequence_dict['input_ids']
A__ = encoded_sequence_dict['special_tokens_mask']
self.assertEqual(len(A_ ) ,len(A_ ) )
A__ = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(A_ )
]
A__ = [x for x in filtered_sequence if x is not None]
self.assertEqual(A_ ,A_ )
@require_tokenizers
class UpperCamelCase__( unittest.TestCase ):
def snake_case__ ( self ) -> str:
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
A__ = AutoTokenizer.from_pretrained('facebook/opt-350m' ,from_slow=A_ )
A__ = 'A photo of a cat'
A__ = tokenizer.encode(
A_ ,)
self.assertEqual(A_ ,[2, 2_50, 13_45, 9, 10, 47_58] )
tokenizer.save_pretrained('test_opt' )
A__ = AutoTokenizer.from_pretrained('./test_opt' )
A__ = tokenizer.encode(
A_ ,)
self.assertEqual(A_ ,[2, 2_50, 13_45, 9, 10, 47_58] )
def snake_case__ ( self ) -> Optional[Any]:
A__ = AutoTokenizer.from_pretrained('facebook/opt-350m' ,use_slow=A_ )
A__ = 'A photo of a cat'
A__ = tokenizer.encode(
A_ ,)
# Same as above
self.assertEqual(A_ ,[2, 2_50, 13_45, 9, 10, 47_58] )
@unittest.skip('This test is failing because of a bug in the fast tokenizer' )
def snake_case__ ( self ) -> Dict:
A__ = AutoTokenizer.from_pretrained('facebook/opt-350m' ,from_slow=A_ )
A__ = 'bos'
A__ = tokenizer.get_vocab()['bos']
A__ = 'A photo of a cat'
A__ = tokenizer.encode(
A_ ,)
# We changed the bos token
self.assertEqual(A_ ,[3_19_57, 2_50, 13_45, 9, 10, 47_58] )
tokenizer.save_pretrained('./tok' )
A__ = AutoTokenizer.from_pretrained('./tok' )
self.assertTrue(tokenizer.is_fast )
A__ = tokenizer.encode(
A_ ,)
self.assertEqual(A_ ,[3_19_57, 2_50, 13_45, 9, 10, 47_58] )
| 221 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def _snake_case ( snake_case__ : Dict ):
A = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
'decoder.output_projection.weight',
]
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def _snake_case ( snake_case__ : int ):
A , A = emb.weight.shape
A = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ )
A = emb.weight.data
return lin_layer
def _snake_case ( snake_case__ : List[str] , snake_case__ : Any="facebook/mbart-large-en-ro" , snake_case__ : Optional[int]=False , snake_case__ : List[str]=False ):
A = torch.load(snake_case__ , map_location='cpu' )['model']
remove_ignore_keys_(snake_case__ )
A = state_dict['encoder.embed_tokens.weight'].shape[0]
A = MBartConfig.from_pretrained(snake_case__ , vocab_size=snake_case__ )
if mbart_aa and finetuned:
A = 'relu'
A = state_dict['decoder.embed_tokens.weight']
A = MBartForConditionalGeneration(snake_case__ )
model.model.load_state_dict(snake_case__ )
if finetuned:
A = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
_lowercase = parser.parse_args()
_lowercase = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path) | 74 | 0 |
"""simple docstring"""
import logging
import os
from .state import PartialState
class _UpperCAmelCase ( logging.LoggerAdapter ):
'''simple docstring'''
@staticmethod
def SCREAMING_SNAKE_CASE (a_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def SCREAMING_SNAKE_CASE (self , a_ , a_ , *a_ , **a_ ):
'''simple docstring'''
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
__snake_case : Optional[Any] = kwargs.pop('''main_process_only''' , A_ )
__snake_case : Optional[Any] = kwargs.pop('''in_order''' , A_ )
if self.isEnabledFor(A_ ):
if self._should_log(A_ ):
__snake_case , __snake_case : Dict = self.process(A_ , A_ )
self.logger.log(A_ , A_ , *A_ , **A_ )
elif in_order:
__snake_case : Dict = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
__snake_case , __snake_case : str = self.process(A_ , A_ )
self.logger.log(A_ , A_ , *A_ , **A_ )
state.wait_for_everyone()
def lowercase ( _snake_case : str , _snake_case : str = None ) ->Union[str, Any]:
"""simple docstring"""
if log_level is None:
__snake_case : Tuple = os.environ.get('''ACCELERATE_LOG_LEVEL''' , snake_case__ )
__snake_case : Tuple = logging.getLogger(snake_case__ )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(snake_case__ , {} )
| 102 |
"""simple docstring"""
import argparse
import struct
import unittest
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Tuple ,A_ : bytes ) -> None:
A = data
# Initialize hash values
A = [
0X6_A_0_9_E_6_6_7,
0XB_B_6_7_A_E_8_5,
0X3_C_6_E_F_3_7_2,
0XA_5_4_F_F_5_3_A,
0X5_1_0_E_5_2_7_F,
0X9_B_0_5_6_8_8_C,
0X1_F_8_3_D_9_A_B,
0X5_B_E_0_C_D_1_9,
]
# Initialize round constants
A = [
0X4_2_8_A_2_F_9_8,
0X7_1_3_7_4_4_9_1,
0XB_5_C_0_F_B_C_F,
0XE_9_B_5_D_B_A_5,
0X3_9_5_6_C_2_5_B,
0X5_9_F_1_1_1_F_1,
0X9_2_3_F_8_2_A_4,
0XA_B_1_C_5_E_D_5,
0XD_8_0_7_A_A_9_8,
0X1_2_8_3_5_B_0_1,
0X2_4_3_1_8_5_B_E,
0X5_5_0_C_7_D_C_3,
0X7_2_B_E_5_D_7_4,
0X8_0_D_E_B_1_F_E,
0X9_B_D_C_0_6_A_7,
0XC_1_9_B_F_1_7_4,
0XE_4_9_B_6_9_C_1,
0XE_F_B_E_4_7_8_6,
0X0_F_C_1_9_D_C_6,
0X2_4_0_C_A_1_C_C,
0X2_D_E_9_2_C_6_F,
0X4_A_7_4_8_4_A_A,
0X5_C_B_0_A_9_D_C,
0X7_6_F_9_8_8_D_A,
0X9_8_3_E_5_1_5_2,
0XA_8_3_1_C_6_6_D,
0XB_0_0_3_2_7_C_8,
0XB_F_5_9_7_F_C_7,
0XC_6_E_0_0_B_F_3,
0XD_5_A_7_9_1_4_7,
0X0_6_C_A_6_3_5_1,
0X1_4_2_9_2_9_6_7,
0X2_7_B_7_0_A_8_5,
0X2_E_1_B_2_1_3_8,
0X4_D_2_C_6_D_F_C,
0X5_3_3_8_0_D_1_3,
0X6_5_0_A_7_3_5_4,
0X7_6_6_A_0_A_B_B,
0X8_1_C_2_C_9_2_E,
0X9_2_7_2_2_C_8_5,
0XA_2_B_F_E_8_A_1,
0XA_8_1_A_6_6_4_B,
0XC_2_4_B_8_B_7_0,
0XC_7_6_C_5_1_A_3,
0XD_1_9_2_E_8_1_9,
0XD_6_9_9_0_6_2_4,
0XF_4_0_E_3_5_8_5,
0X1_0_6_A_A_0_7_0,
0X1_9_A_4_C_1_1_6,
0X1_E_3_7_6_C_0_8,
0X2_7_4_8_7_7_4_C,
0X3_4_B_0_B_C_B_5,
0X3_9_1_C_0_C_B_3,
0X4_E_D_8_A_A_4_A,
0X5_B_9_C_C_A_4_F,
0X6_8_2_E_6_F_F_3,
0X7_4_8_F_8_2_E_E,
0X7_8_A_5_6_3_6_F,
0X8_4_C_8_7_8_1_4,
0X8_C_C_7_0_2_0_8,
0X9_0_B_E_F_F_F_A,
0XA_4_5_0_6_C_E_B,
0XB_E_F_9_A_3_F_7,
0XC_6_7_1_7_8_F_2,
]
A = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : bytes ) -> bytes:
A = B'\x80' + (B'\x00' * (63 - (len(A_ ) + 8) % 64))
A = struct.pack('>Q' ,(len(A_ ) * 8) )
return data + padding + big_endian_integer
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> None:
# Convert into blocks of 64 bytes
A = [
self.preprocessed_data[x : x + 64]
for x in range(0 ,len(self.preprocessed_data ) ,64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
A = list(struct.unpack('>16L' ,A_ ) )
# add 48 0-ed integers
words += [0] * 48
A , A , A , A , A , A , A , A = self.hashes
for index in range(0 ,64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
A = (
self.ror(words[index - 15] ,7 )
^ self.ror(words[index - 15] ,18 )
^ (words[index - 15] >> 3)
)
A = (
self.ror(words[index - 2] ,17 )
^ self.ror(words[index - 2] ,19 )
^ (words[index - 2] >> 10)
)
A = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X1_0_0_0_0_0_0_0_0
# Compression
A = self.ror(A_ ,6 ) ^ self.ror(A_ ,11 ) ^ self.ror(A_ ,25 )
A = (e & f) ^ ((~e & 0XF_F_F_F_F_F_F_F) & g)
A = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X1_0_0_0_0_0_0_0_0
A = self.ror(A_ ,2 ) ^ self.ror(A_ ,13 ) ^ self.ror(A_ ,22 )
A = (a & b) ^ (a & c) ^ (b & c)
A = (sa + maj) % 0X1_0_0_0_0_0_0_0_0
A , A , A , A , A , A , A , A = (
g,
f,
e,
((d + tempa) % 0X1_0_0_0_0_0_0_0_0),
c,
b,
a,
((tempa + tempa) % 0X1_0_0_0_0_0_0_0_0),
)
A = [a, b, c, d, e, f, g, h]
# Modify final values
A = [
((element + mutated_hash_values[index]) % 0X1_0_0_0_0_0_0_0_0)
for index, element in enumerate(self.hashes )
]
A = ''.join([hex(A_ )[2:].zfill(8 ) for value in self.hashes] )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : int ,A_ : int ) -> int:
return 0XF_F_F_F_F_F_F_F & (value << (32 - rotations)) | (value >> rotations)
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> None:
import hashlib
A = bytes('Test String' ,'utf-8' )
self.assertEqual(SHAaaa(A_ ).hash ,hashlib.shaaaa(A_ ).hexdigest() )
def _snake_case ( ):
import doctest
doctest.testmod()
A = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
A = parser.parse_args()
A = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
A = f.read()
else:
A = bytes(snake_case__ , 'utf-8' )
print(SHAaaa(snake_case__ ).hash )
if __name__ == "__main__":
main() | 74 | 0 |
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Union[str, Any] = []
for line in lines:
__UpperCamelCase :List[Any] = re.sub(R'''#.*''' , '''''' , snake_case__ ) # remove comments
if line:
filtered_lines.append(snake_case__ )
__UpperCamelCase :List[str] = '''\n'''.join(snake_case__ )
# Make a hash from all this code
__UpperCamelCase :int = full_str.encode('''utf-8''' )
return shaaaa(snake_case__ ).hexdigest()
# get importable module names and hash for caching
__lowercase = {
'''csv''': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'''json''': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'''pandas''': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'''parquet''': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'''arrow''': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'''text''': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'''imagefolder''': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'''audiofolder''': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
__lowercase = {
'''.csv''': ('''csv''', {}),
'''.tsv''': ('''csv''', {'''sep''': '''\t'''}),
'''.json''': ('''json''', {}),
'''.jsonl''': ('''json''', {}),
'''.parquet''': ('''parquet''', {}),
'''.arrow''': ('''arrow''', {}),
'''.txt''': ('''text''', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
__lowercase = {'''imagefolder''', '''audiofolder'''}
# Used to filter data files based on extensions given a module name
__lowercase = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('''.zip''')
_MODULE_TO_EXTENSIONS["audiofolder"].append('''.zip''')
| 43 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowercase = {'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''DeiTFeatureExtractor''']
_lowercase = ['''DeiTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DeiTForImageClassification''',
'''DeiTForImageClassificationWithTeacher''',
'''DeiTForMaskedImageModeling''',
'''DeiTModel''',
'''DeiTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDeiTForImageClassification''',
'''TFDeiTForImageClassificationWithTeacher''',
'''TFDeiTForMaskedImageModeling''',
'''TFDeiTModel''',
'''TFDeiTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 74 | 0 |
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : List[Any] = logging.get_logger(__name__)
def _a ( SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = OrderedDict()
for key, value in state_dict.items():
if key.startswith('''module.encoder''' ):
UpperCamelCase__ : Dict = key.replace('''module.encoder''' , '''glpn.encoder''' )
if key.startswith('''module.decoder''' ):
UpperCamelCase__ : Any = key.replace('''module.decoder''' , '''decoder.stages''' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
UpperCamelCase__ : Union[str, Any] = key[key.find('''patch_embed''' ) + len('''patch_embed''' )]
UpperCamelCase__ : List[str] = key.replace(F"patch_embed{idx}" , F"patch_embeddings.{int(snake_case__ )-1}" )
if "norm" in key:
UpperCamelCase__ : str = key.replace('''norm''' , '''layer_norm''' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
UpperCamelCase__ : Dict = key[key.find('''glpn.encoder.layer_norm''' ) + len('''glpn.encoder.layer_norm''' )]
UpperCamelCase__ : List[Any] = key.replace(F"layer_norm{idx}" , F"layer_norm.{int(snake_case__ )-1}" )
if "layer_norm1" in key:
UpperCamelCase__ : Dict = key.replace('''layer_norm1''' , '''layer_norm_1''' )
if "layer_norm2" in key:
UpperCamelCase__ : List[str] = key.replace('''layer_norm2''' , '''layer_norm_2''' )
if "block" in key:
# replace for example block1 by block.0
UpperCamelCase__ : int = key[key.find('''block''' ) + len('''block''' )]
UpperCamelCase__ : List[str] = key.replace(F"block{idx}" , F"block.{int(snake_case__ )-1}" )
if "attn.q" in key:
UpperCamelCase__ : List[Any] = key.replace('''attn.q''' , '''attention.self.query''' )
if "attn.proj" in key:
UpperCamelCase__ : Any = key.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in key:
UpperCamelCase__ : str = key.replace('''attn''' , '''attention.self''' )
if "fc1" in key:
UpperCamelCase__ : Union[str, Any] = key.replace('''fc1''' , '''dense1''' )
if "fc2" in key:
UpperCamelCase__ : Dict = key.replace('''fc2''' , '''dense2''' )
if "linear_pred" in key:
UpperCamelCase__ : Optional[Any] = key.replace('''linear_pred''' , '''classifier''' )
if "linear_fuse" in key:
UpperCamelCase__ : Optional[int] = key.replace('''linear_fuse.conv''' , '''linear_fuse''' )
UpperCamelCase__ : Any = key.replace('''linear_fuse.bn''' , '''batch_norm''' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
UpperCamelCase__ : Any = key[key.find('''linear_c''' ) + len('''linear_c''' )]
UpperCamelCase__ : List[Any] = key.replace(F"linear_c{idx}" , F"linear_c.{int(snake_case__ )-1}" )
if "bot_conv" in key:
UpperCamelCase__ : Union[str, Any] = key.replace('''bot_conv''' , '''0.convolution''' )
if "skip_conv1" in key:
UpperCamelCase__ : List[Any] = key.replace('''skip_conv1''' , '''1.convolution''' )
if "skip_conv2" in key:
UpperCamelCase__ : str = key.replace('''skip_conv2''' , '''2.convolution''' )
if "fusion1" in key:
UpperCamelCase__ : str = key.replace('''fusion1''' , '''1.fusion''' )
if "fusion2" in key:
UpperCamelCase__ : Any = key.replace('''fusion2''' , '''2.fusion''' )
if "fusion3" in key:
UpperCamelCase__ : Any = key.replace('''fusion3''' , '''3.fusion''' )
if "fusion" in key and "conv" in key:
UpperCamelCase__ : List[str] = key.replace('''conv''' , '''convolutional_layer''' )
if key.startswith('''module.last_layer_depth''' ):
UpperCamelCase__ : List[str] = key.replace('''module.last_layer_depth''' , '''head.head''' )
UpperCamelCase__ : Union[str, Any] = value
return new_state_dict
def _a ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
UpperCamelCase__ : Optional[Any] = state_dict.pop(F"glpn.encoder.block.{i}.{j}.attention.self.kv.weight" )
UpperCamelCase__ : Optional[Any] = state_dict.pop(F"glpn.encoder.block.{i}.{j}.attention.self.kv.bias" )
# next, add keys and values (in that order) to the state dict
UpperCamelCase__ : Optional[int] = kv_weight[
: config.hidden_sizes[i], :
]
UpperCamelCase__ : int = kv_bias[: config.hidden_sizes[i]]
UpperCamelCase__ : str = kv_weight[
config.hidden_sizes[i] :, :
]
UpperCamelCase__ : str = kv_bias[config.hidden_sizes[i] :]
def _a ( ):
"""simple docstring"""
UpperCamelCase__ : Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCamelCase__ : Union[str, Any] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return image
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : str=False , SCREAMING_SNAKE_CASE : Union[str, Any]=None ):
"""simple docstring"""
UpperCamelCase__ : int = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
UpperCamelCase__ : Union[str, Any] = GLPNImageProcessor()
# prepare image
UpperCamelCase__ : str = prepare_img()
UpperCamelCase__ : List[Any] = image_processor(images=snake_case__ , return_tensors='''pt''' ).pixel_values
logger.info('''Converting model...''' )
# load original state dict
UpperCamelCase__ : List[Any] = torch.load(snake_case__ , map_location=torch.device('''cpu''' ) )
# rename keys
UpperCamelCase__ : Tuple = rename_keys(snake_case__ )
# key and value matrices need special treatment
read_in_k_v(snake_case__ , snake_case__ )
# create HuggingFace model and load state dict
UpperCamelCase__ : Union[str, Any] = GLPNForDepthEstimation(snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
# forward pass
UpperCamelCase__ : str = model(snake_case__ )
UpperCamelCase__ : List[Any] = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
UpperCamelCase__ : List[Any] = torch.tensor(
[[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] )
elif "kitti" in model_name:
UpperCamelCase__ : int = torch.tensor(
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] )
else:
raise ValueError(F"Unknown model name: {model_name}" )
UpperCamelCase__ : Optional[Any] = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , snake_case__ , atol=1E-4 )
print('''Looks ok!''' )
# finally, push to hub if required
if push_to_hub:
logger.info('''Pushing model and image processor to the hub...''' )
model.push_to_hub(
repo_path_or_name=Path(snake_case__ , snake_case__ ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=snake_case__ , )
image_processor.push_to_hub(
repo_path_or_name=Path(snake_case__ , snake_case__ ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=snake_case__ , )
if __name__ == "__main__":
__UpperCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path",
default=None,
type=str,
help="Path to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
parser.add_argument(
"--model_name",
default="glpn-kitti",
type=str,
help="Name of the model in case you\'re pushing to the hub.",
)
__UpperCamelCase : List[str] = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 146 |
"""simple docstring"""
from __future__ import annotations
import requests
def _snake_case ( snake_case__ : str ):
A = F'https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'
return requests.get(snake_case__ ).json()
def _snake_case ( snake_case__ : int = 10 ):
A = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'
A = requests.get(snake_case__ ).json()[:max_stories]
return [get_hackernews_story(snake_case__ ) for story_id in story_ids]
def _snake_case ( snake_case__ : int = 10 ):
A = hackernews_top_stories(snake_case__ )
return "\n".join('* [{title}]({url})'.format(**snake_case__ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown()) | 74 | 0 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCAmelCase_ = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
UpperCAmelCase_ = {'facebook/blenderbot-3B': 1_2_8}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _UpperCamelCase ( ):
'''simple docstring'''
UpperCAmelCase__ = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
UpperCAmelCase__ = bs[:]
UpperCAmelCase__ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(snake_case__ )
cs.append(2**8 + n )
n += 1
UpperCAmelCase__ = [chr(snake_case__ ) for n in cs]
return dict(zip(snake_case__ , snake_case__ ) )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ = set()
UpperCAmelCase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase__ = char
return pairs
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
lowerCAmelCase_ : Dict = VOCAB_FILES_NAMES
lowerCAmelCase_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : Dict = ['''input_ids''', '''attention_mask''']
def __init__( self : Any , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : int="replace" , _UpperCAmelCase : List[str]="<s>" , _UpperCAmelCase : List[Any]="</s>" , _UpperCAmelCase : Optional[Any]="</s>" , _UpperCAmelCase : List[str]="<s>" , _UpperCAmelCase : int="<unk>" , _UpperCAmelCase : str="<pad>" , _UpperCAmelCase : Union[str, Any]="<mask>" , _UpperCAmelCase : int=False , **_UpperCAmelCase : str , ):
"""simple docstring"""
UpperCAmelCase__ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else bos_token
UpperCAmelCase__ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else eos_token
UpperCAmelCase__ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else sep_token
UpperCAmelCase__ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else cls_token
UpperCAmelCase__ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else unk_token
UpperCAmelCase__ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase__ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
super().__init__(
errors=A_ , bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , add_prefix_space=A_ , **A_ , )
with open(A_ , encoding="""utf-8""" ) as vocab_handle:
UpperCAmelCase__ = json.load(A_ )
UpperCAmelCase__ = {v: k for k, v in self.encoder.items()}
UpperCAmelCase__ = errors # how to handle errors in decoding
UpperCAmelCase__ = bytes_to_unicode()
UpperCAmelCase__ = {v: k for k, v in self.byte_encoder.items()}
with open(A_ , encoding="""utf-8""" ) as merges_handle:
UpperCAmelCase__ = merges_handle.read().split("""\n""" )[1:-1]
UpperCAmelCase__ = [tuple(merge.split() ) for merge in bpe_merges]
UpperCAmelCase__ = dict(zip(A_ , range(len(A_ ) ) ) )
UpperCAmelCase__ = {}
UpperCAmelCase__ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCAmelCase__ = re.compile(r"""\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
return len(self.encoder )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Union[str, Any] ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
UpperCAmelCase__ = tuple(A_ )
UpperCAmelCase__ = get_pairs(A_ )
if not pairs:
return token
while True:
UpperCAmelCase__ = min(A_ , key=lambda _UpperCAmelCase : self.bpe_ranks.get(A_ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase__ , UpperCAmelCase__ = bigram
UpperCAmelCase__ = []
UpperCAmelCase__ = 0
while i < len(A_ ):
try:
UpperCAmelCase__ = word.index(A_ , A_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase__ = j
if word[i] == first and i < len(A_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase__ = tuple(A_ )
UpperCAmelCase__ = new_word
if len(A_ ) == 1:
break
else:
UpperCAmelCase__ = get_pairs(A_ )
UpperCAmelCase__ = """ """.join(A_ )
UpperCAmelCase__ = word
return word
def SCREAMING_SNAKE_CASE__ ( self : List[str] , _UpperCAmelCase : int ):
"""simple docstring"""
UpperCAmelCase__ = []
for token in re.findall(self.pat , A_ ):
UpperCAmelCase__ = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(A_ ).split(""" """ ) )
return bpe_tokens
def SCREAMING_SNAKE_CASE__ ( self : List[str] , _UpperCAmelCase : Dict ):
"""simple docstring"""
return self.encoder.get(A_ , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : List[Any] ):
"""simple docstring"""
return self.decoder.get(A_ )
def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : List[Any] ):
"""simple docstring"""
UpperCAmelCase__ = """""".join(A_ )
UpperCAmelCase__ = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(A_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase__ = os.path.join(
A_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase__ = os.path.join(
A_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(A_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=A_ , ensure_ascii=A_ ) + """\n""" )
UpperCAmelCase__ = 0
with open(A_ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
UpperCAmelCase__ = token_index
writer.write(""" """.join(A_ ) + """\n""" )
index += 1
return vocab_file, merge_file
def SCREAMING_SNAKE_CASE__ ( self : List[str] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None , _UpperCAmelCase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is None:
return [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1]
def SCREAMING_SNAKE_CASE__ ( self : Tuple , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ):
"""simple docstring"""
UpperCAmelCase__ = [self.sep_token_id]
UpperCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE__ ( self : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any]=False , **_UpperCAmelCase : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(A_ ) > 0 and not text[0].isspace()):
UpperCAmelCase__ = """ """ + text
return (text, kwargs)
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ):
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : "Conversation" ):
"""simple docstring"""
UpperCAmelCase__ = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(""" """ + text )
else:
# Generated responses should contain them already.
inputs.append(A_ )
UpperCAmelCase__ = """ """.join(A_ )
UpperCAmelCase__ = self.encode(A_ )
if len(A_ ) > self.model_max_length:
UpperCAmelCase__ = input_ids[-self.model_max_length :]
logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 346 |
"""simple docstring"""
from string import ascii_uppercase
_lowercase = {char: i for i, char in enumerate(ascii_uppercase)}
_lowercase = dict(enumerate(ascii_uppercase))
def _snake_case ( snake_case__ : str , snake_case__ : str ):
A = len(snake_case__ )
A = 0
while True:
if x == i:
A = 0
if len(snake_case__ ) == len(snake_case__ ):
break
key += key[i]
i += 1
return key
def _snake_case ( snake_case__ : str , snake_case__ : str ):
A = ''
A = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
A = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def _snake_case ( snake_case__ : str , snake_case__ : str ):
A = ''
A = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
A = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def _snake_case ( ):
A = 'THE GERMAN ATTACK'
A = 'SECRET'
A = generate_key(snake_case__ , snake_case__ )
A = cipher_text(snake_case__ , snake_case__ )
print(F'Encrypted Text = {s}' )
print(F'Original Text = {original_text(snake_case__ , snake_case__ )}' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 74 | 0 |
"""simple docstring"""
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
lowercase_ = TypeVar('T')
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
return (position - 1) // 2
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
return (2 * position) + 1
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
return (2 * position) + 2
class snake_case ( Generic[T] ):
'''simple docstring'''
def __init__( self : Optional[Any] ):
'''simple docstring'''
__A = []
__A = {}
__A = 0
def __len__( self : Optional[int] ):
'''simple docstring'''
return self.elements
def __repr__( self : Optional[Any] ):
'''simple docstring'''
return str(self.heap )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
# Check if the priority queue is empty
return self.elements == 0
def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : T, _lowerCamelCase : int ):
'''simple docstring'''
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
__A = self.elements
self.elements += 1
self._bubble_up(A_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0, self.elements - 1 )
__A , __A = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
__A , __A = self.heap[0]
self._bubble_down(A_ )
return elem
def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : T, _lowerCamelCase : int ):
'''simple docstring'''
# Update the weight of the given key
__A = self.position_map[elem]
__A = (elem, weight)
if position > 0:
__A = get_parent_position(A_ )
__A , __A = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(A_ )
else:
self._bubble_down(A_ )
else:
self._bubble_down(A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : T ):
'''simple docstring'''
# Place a node at the proper position (upward movement) [to be used internally
# only]
__A = self.position_map[elem]
if curr_pos == 0:
return None
__A = get_parent_position(A_ )
__A , __A = self.heap[curr_pos]
__A , __A = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(A_, A_ )
return self._bubble_up(A_ )
return None
def _SCREAMING_SNAKE_CASE ( self : List[Any], _lowerCamelCase : T ):
'''simple docstring'''
# Place a node at the proper position (downward movement) [to be used
# internally only]
__A = self.position_map[elem]
__A , __A = self.heap[curr_pos]
__A = get_child_left_position(A_ )
__A = get_child_right_position(A_ )
if child_left_position < self.elements and child_right_position < self.elements:
__A , __A = self.heap[child_left_position]
__A , __A = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(A_, A_ )
return self._bubble_down(A_ )
if child_left_position < self.elements:
__A , __A = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(A_, A_ )
return self._bubble_down(A_ )
else:
return None
if child_right_position < self.elements:
__A , __A = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(A_, A_ )
return self._bubble_down(A_ )
return None
def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : int, _lowerCamelCase : int ):
'''simple docstring'''
# Swap the nodes at the given positions
__A = self.heap[nodea_pos][0]
__A = self.heap[nodea_pos][0]
__A , __A = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
__A = nodea_pos
__A = nodea_pos
class snake_case ( Generic[T] ):
'''simple docstring'''
def __init__( self : Union[str, Any] ):
'''simple docstring'''
__A = {}
__A = 0
def __repr__( self : Tuple ):
'''simple docstring'''
return str(self.connections )
def __len__( self : str ):
'''simple docstring'''
return self.nodes
def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : T ):
'''simple docstring'''
# Add a node in the graph if it is not in the graph
if node not in self.connections:
__A = {}
self.nodes += 1
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : T, _lowerCamelCase : T, _lowerCamelCase : int ):
'''simple docstring'''
# Add an edge between 2 nodes in the graph
self.add_node(A_ )
self.add_node(A_ )
__A = weight
__A = weight
def lowerCAmelCase ( __UpperCamelCase , ):
"""simple docstring"""
__A = {node: maxsize for node in graph.connections}
__A = {node: None for node in graph.connections}
__A = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(snake_case__ , snake_case__ )
if priority_queue.is_empty():
return dist, parent
# initialization
__A = priority_queue.extract_min()
__A = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__A = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(snake_case__ , dist[neighbour] )
__A = node
# running prim's algorithm
while not priority_queue.is_empty():
__A = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__A = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(snake_case__ , dist[neighbour] )
__A = node
return dist, parent
| 266 |
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : List[Any] ) -> Optional[Any]:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] ,model_result['ss'] ):
A = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(A_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
A = 'sshleifer/tiny-gpt2'
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
A = 'sgugger/tiny-distilbert-classification'
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,only_pretrain_model=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
A = 'sshleifer/tiny-gpt2'
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,torchscript=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == 'cpu' ,'Cant do half precision' )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
A = 'sshleifer/tiny-gpt2'
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,fpaa=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
A = 'sshleifer/tiny-gpt2'
A = AutoConfig.from_pretrained(A_ )
# set architectures equal to `None`
A = None
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ ,configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
A = 'sshleifer/tiny-gpt2'
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == 'cpu' ,'Can\'t do half precision' )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
A = 'sshleifer/tiny-gpt2'
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,fpaa=A_ ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
A = 'sshleifer/tiny-gpt2'
A = AutoConfig.from_pretrained(A_ )
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ ,configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
A = 'sshleifer/tinier_bart'
A = AutoConfig.from_pretrained(A_ )
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ ,configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
A = 'sshleifer/tiny-gpt2'
A = AutoConfig.from_pretrained(A_ )
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ ,configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
A = 'sshleifer/tinier_bart'
A = AutoConfig.from_pretrained(A_ )
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ ,configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
A = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,save_to_csv=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(A_ ,'inf_time.csv' ) ,train_memory_csv_file=os.path.join(A_ ,'train_mem.csv' ) ,inference_memory_csv_file=os.path.join(A_ ,'inf_mem.csv' ) ,train_time_csv_file=os.path.join(A_ ,'train_time.csv' ) ,env_info_csv_file=os.path.join(A_ ,'env.csv' ) ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
benchmark.run()
self.assertTrue(Path(os.path.join(A_ ,'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(A_ ,'train_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(A_ ,'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(A_ ,'train_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(A_ ,'env.csv' ) ).exists() )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
A = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(A_ : Optional[int] ):
self.assertTrue(hasattr(A_ ,'sequential' ) )
self.assertTrue(hasattr(A_ ,'cumulative' ) )
self.assertTrue(hasattr(A_ ,'current' ) )
self.assertTrue(hasattr(A_ ,'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(A_ ,'log.txt' ) ,log_print=A_ ,trace_memory_line_by_line=A_ ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(A_ ,'log.txt' ) ).exists() ) | 74 | 0 |
class a__ :
"""simple docstring"""
def __init__( self , lowercase ) -> Union[str, Any]:
'''simple docstring'''
A__ = n
A__ = [None] * self.n
A__ = 0 # index of the first element
A__ = 0
A__ = 0
def __len__( self ) -> int:
'''simple docstring'''
return self.size
def UpperCamelCase ( self ) -> bool:
'''simple docstring'''
return self.size == 0
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
return False if self.is_empty() else self.array[self.front]
def UpperCamelCase ( self , lowercase ) -> int:
'''simple docstring'''
if self.size >= self.n:
raise Exception("QUEUE IS FULL" )
A__ = data
A__ = (self.rear + 1) % self.n
self.size += 1
return self
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
if self.size == 0:
raise Exception("UNDERFLOW" )
A__ = self.array[self.front]
A__ = None
A__ = (self.front + 1) % self.n
self.size -= 1
return temp
| 68 |
"""simple docstring"""
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
_lowercase = re.compile(r'''^(?P<major>\d+)''' r'''\.(?P<minor>\d+)''' r'''\.(?P<patch>\d+)$''')
@total_ordering
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
_lowerCamelCase: str
_lowerCamelCase: Optional[str] = None
_lowerCamelCase: Optional[Union[str, int]] = None
_lowerCamelCase: Optional[Union[str, int]] = None
_lowerCamelCase: Optional[Union[str, int]] = None
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
A , A , A = _str_to_version_tuple(self.version_str )
def __repr__( self : Optional[int] ) -> Dict:
return F'{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
return self.major, self.minor, self.patch
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Tuple ) -> Union[str, Any]:
if isinstance(A_ ,A_ ):
return Version(A_ )
elif isinstance(A_ ,A_ ):
return other
raise TypeError(F'{other} (type {type(A_ )}) cannot be compared to version.' )
def __eq__( self : List[Any] ,A_ : Dict ) -> Any:
try:
A = self._validate_operand(A_ )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self : List[Any] ,A_ : Optional[int] ) -> Tuple:
A = self._validate_operand(A_ )
return self.tuple < other.tuple
def __hash__( self : Union[str, Any] ) -> Union[str, Any]:
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any ,A_ : List[str] ) -> List[str]:
A = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
return self.version_str
def _snake_case ( snake_case__ : List[str] ):
A = _VERSION_REG.match(snake_case__ )
if not res:
raise ValueError(F'Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.' )
return tuple(int(snake_case__ ) for v in [res.group('major' ), res.group('minor' ), res.group('patch' )] )
def _snake_case ( snake_case__ : str ):
return ".".join(str(snake_case__ ) for v in version_tuple ) | 74 | 0 |
"""simple docstring"""
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class A_ ( _lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ''''''
SCREAMING_SNAKE_CASE_ = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
SCREAMING_SNAKE_CASE_ = None # compression type in fsspec. ex: "gzip"
SCREAMING_SNAKE_CASE_ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self :Union[str, Any] , lowerCamelCase_ :str = "" , lowerCamelCase_ :Optional[str] = None , lowerCamelCase_ :Optional[dict] = None , **lowerCamelCase_ :int ):
"""simple docstring"""
super().__init__(self , **A_ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
lowerCamelCase__ : Dict =fsspec.open(
A_ , mode='rb' , protocol=A_ , compression=self.compression , client_kwargs={
'requote_redirect_url': False, # see https://github.com/huggingface/datasets/pull/5459
'trust_env': True, # Enable reading proxy env variables.
**(target_options or {}).pop('client_kwargs' , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
lowerCamelCase__ : Any =os.path.basename(self.file.path.split('::' )[0] )
lowerCamelCase__ : Any =(
self.compressed_name[: self.compressed_name.rindex('.' )]
if '.' in self.compressed_name
else self.compressed_name
)
lowerCamelCase__ : Dict =None
@classmethod
def UpperCAmelCase__ ( cls :List[Any] , lowerCamelCase_ :Union[str, Any] ):
"""simple docstring"""
return super()._strip_protocol(A_ ).lstrip('/' )
def UpperCAmelCase__ ( self :Tuple ):
"""simple docstring"""
if self.dir_cache is None:
lowerCamelCase__ : Tuple ={**self.file.fs.info(self.file.path ), 'name': self.uncompressed_name}
lowerCamelCase__ : Any ={f['name']: f}
def UpperCAmelCase__ ( self :Tuple , lowerCamelCase_ :str ):
"""simple docstring"""
return self.file.open().read()
def UpperCAmelCase__ ( self :Tuple , lowerCamelCase_ :str , lowerCamelCase_ :str = "rb" , lowerCamelCase_ :List[str]=None , lowerCamelCase_ :Optional[int]=True , lowerCamelCase_ :Dict=None , **lowerCamelCase_ :int , ):
"""simple docstring"""
lowerCamelCase__ : List[str] =self._strip_protocol(A_ )
if mode != "rb":
raise ValueError(f"""Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'""" )
return self.file.open()
class A_ ( _lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''bz2'''
SCREAMING_SNAKE_CASE_ = '''bz2'''
SCREAMING_SNAKE_CASE_ = '''.bz2'''
class A_ ( _lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''gzip'''
SCREAMING_SNAKE_CASE_ = '''gzip'''
SCREAMING_SNAKE_CASE_ = '''.gz'''
class A_ ( _lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''lz4'''
SCREAMING_SNAKE_CASE_ = '''lz4'''
SCREAMING_SNAKE_CASE_ = '''.lz4'''
class A_ ( _lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''xz'''
SCREAMING_SNAKE_CASE_ = '''xz'''
SCREAMING_SNAKE_CASE_ = '''.xz'''
class A_ ( _lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''zstd'''
SCREAMING_SNAKE_CASE_ = '''zstd'''
SCREAMING_SNAKE_CASE_ = '''.zst'''
def __init__( self :Optional[Any] , lowerCamelCase_ :str , lowerCamelCase_ :str = "rb" , lowerCamelCase_ :Optional[str] = None , lowerCamelCase_ :Optional[dict] = None , lowerCamelCase_ :int = DEFAULT_BLOCK_SIZE , **lowerCamelCase_ :int , ):
"""simple docstring"""
super().__init__(
fo=A_ , mode=A_ , target_protocol=A_ , target_options=A_ , block_size=A_ , **A_ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
lowerCamelCase__ : Optional[Any] =self.file.__enter__
class A_ :
"""simple docstring"""
def __init__( self :List[Any] , lowerCamelCase_ :int ):
"""simple docstring"""
lowerCamelCase__ : Any =file_
def __enter__( self :Union[str, Any] ):
"""simple docstring"""
self._file.__enter__()
return self
def __exit__( self :Dict , *lowerCamelCase_ :Dict , **lowerCamelCase_ :List[str] ):
"""simple docstring"""
self._file.__exit__(*A_ , **A_ )
def __iter__( self :Optional[int] ):
"""simple docstring"""
return iter(self._file )
def UpperCAmelCase__ ( self :Optional[int] ):
"""simple docstring"""
return next(self._file )
def __getattr__( self :Optional[Any] , lowerCamelCase_ :int ):
"""simple docstring"""
return getattr(self._file , A_ )
def fixed_enter(*lowerCamelCase_ :List[Any] , **lowerCamelCase_ :List[str] ):
return WrappedFile(_enter(*A_ , **A_ ) )
lowerCamelCase__ : Optional[int] =fixed_enter | 126 |
"""simple docstring"""
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
_lowercase = NewType('''DataClass''', Any)
_lowercase = NewType('''DataClassType''', Any)
def _snake_case ( snake_case__ : Tuple ):
if isinstance(snake_case__ , snake_case__ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F'Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).' )
def _snake_case ( snake_case__ : list ):
A = {str(snake_case__ ): choice for choice in choices}
return lambda snake_case__ : str_to_choice.get(snake_case__ , snake_case__ )
def _snake_case ( *,
snake_case__ : Union[str, List[str]] = None , snake_case__ : str = None , snake_case__ : Any = dataclasses.MISSING , snake_case__ : Callable[[], Any] = dataclasses.MISSING , snake_case__ : dict = None , **snake_case__ : Any , ):
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
A = {}
if aliases is not None:
A = aliases
if help is not None:
A = help
return dataclasses.field(metadata=snake_case__ , default=snake_case__ , default_factory=snake_case__ , **snake_case__ )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Iterable[DataClassType]
def __init__( self : List[str] ,A_ : Union[DataClassType, Iterable[DataClassType]] ,**A_ : Any ) -> Optional[int]:
# To make the default appear when using --help
if "formatter_class" not in kwargs:
A = ArgumentDefaultsHelpFormatter
super().__init__(**A_ )
if dataclasses.is_dataclass(A_ ):
A = [dataclass_types]
A = list(A_ )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(A_ )
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : ArgumentParser ,A_ : dataclasses.Field ) -> Optional[Any]:
A = F'--{field.name}'
A = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type ,A_ ):
raise RuntimeError(
'Unresolved type detected, which should have been done with the help of '
'`typing.get_type_hints` method by default' )
A = kwargs.pop('aliases' ,[] )
if isinstance(A_ ,A_ ):
A = [aliases]
A = getattr(field.type ,'__origin__' ,field.type )
if origin_type is Union or (hasattr(A_ ,'UnionType' ) and isinstance(A_ ,types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(A_ ) not in field.type.__args__
):
raise ValueError(
'Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'
' the argument parser only supports one type per argument.'
F' Problem encountered in field \'{field.name}\'.' )
if type(A_ ) not in field.type.__args__:
# filter `str` in Union
A = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
A = getattr(field.type ,'__origin__' ,field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
A = (
field.type.__args__[0] if isinstance(A_ ,field.type.__args__[1] ) else field.type.__args__[1]
)
A = getattr(field.type ,'__origin__' ,field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
A = {}
if origin_type is Literal or (isinstance(field.type ,A_ ) and issubclass(field.type ,A_ )):
if origin_type is Literal:
A = field.type.__args__
else:
A = [x.value for x in field.type]
A = make_choice_type_function(kwargs['choices'] )
if field.default is not dataclasses.MISSING:
A = field.default
else:
A = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
A = copy(A_ )
# Hack because type=bool in argparse does not behave as we want.
A = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
A = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
A = default
# This tells argparse we accept 0 or 1 value after --field_name
A = '?'
# This is the value that will get picked if we do --field_name (without value)
A = True
elif isclass(A_ ) and issubclass(A_ ,A_ ):
A = field.type.__args__[0]
A = '+'
if field.default_factory is not dataclasses.MISSING:
A = field.default_factory()
elif field.default is dataclasses.MISSING:
A = True
else:
A = field.type
if field.default is not dataclasses.MISSING:
A = field.default
elif field.default_factory is not dataclasses.MISSING:
A = field.default_factory()
else:
A = True
parser.add_argument(A_ ,*A_ ,**A_ )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
A = False
parser.add_argument(F'--no_{field.name}' ,action='store_false' ,dest=field.name ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : DataClassType ) -> List[Any]:
if hasattr(A_ ,'_argument_group_name' ):
A = self.add_argument_group(dtype._argument_group_name )
else:
A = self
try:
A = get_type_hints(A_ )
except NameError:
raise RuntimeError(
F'Type resolution failed for {dtype}. Try declaring the class in global scope or '
'removing line of `from __future__ import annotations` which opts in Postponed '
'Evaluation of Annotations (PEP 563)' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(A_ ):
A = '.'.join(map(A_ ,sys.version_info[:3] ) )
raise RuntimeError(
F'Type resolution failed for {dtype} on Python {python_version}. Try removing '
'line of `from __future__ import annotations` which opts in union types as '
'`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '
'support Python versions that lower than 3.10, you need to use '
'`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '
'`X | None`.' ) from ex
raise
for field in dataclasses.fields(A_ ):
if not field.init:
continue
A = type_hints[field.name]
self._parse_dataclass_field(A_ ,A_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Any=None ,A_ : int=False ,A_ : Any=True ,A_ : List[str]=None ,A_ : Union[str, Any]=None ,) -> Tuple[DataClass, ...]:
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
A = []
if args_filename:
args_files.append(Path(A_ ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('.args' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
A = ArgumentParser()
args_file_parser.add_argument(A_ ,type=A_ ,action='append' )
# Use only remaining args for further parsing (remove the args_file_flag)
A , A = args_file_parser.parse_known_args(args=A_ )
A = vars(A_ ).get(args_file_flag.lstrip('-' ) ,A_ )
if cmd_args_file_paths:
args_files.extend([Path(A_ ) for p in cmd_args_file_paths] )
A = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
A = file_args + args if args is not None else file_args + sys.argv[1:]
A , A = self.parse_known_args(args=A_ )
A = []
for dtype in self.dataclass_types:
A = {f.name for f in dataclasses.fields(A_ ) if f.init}
A = {k: v for k, v in vars(A_ ).items() if k in keys}
for k in keys:
delattr(A_ ,A_ )
A = dtype(**A_ )
outputs.append(A_ )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(A_ )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'Some specified arguments are not used by the HfArgumentParser: {remaining_args}' )
return (*outputs,)
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Dict[str, Any] ,A_ : bool = False ) -> Tuple[DataClass, ...]:
A = set(args.keys() )
A = []
for dtype in self.dataclass_types:
A = {f.name for f in dataclasses.fields(A_ ) if f.init}
A = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
A = dtype(**A_ )
outputs.append(A_ )
if not allow_extra_keys and unused_keys:
raise ValueError(F'Some keys are not used by the HfArgumentParser: {sorted(A_ )}' )
return tuple(A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : bool = False ) -> Tuple[DataClass, ...]:
with open(Path(A_ ) ,encoding='utf-8' ) as open_json_file:
A = json.loads(open_json_file.read() )
A = self.parse_dict(A_ ,allow_extra_keys=A_ )
return tuple(A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : bool = False ) -> Tuple[DataClass, ...]:
A = self.parse_dict(yaml.safe_load(Path(A_ ).read_text() ) ,allow_extra_keys=A_ )
return tuple(A_ ) | 74 | 0 |
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def UpperCAmelCase__ ( _A : int ):
'''simple docstring'''
random.seed(snake_case__ )
np.random.seed(snake_case__ )
torch.manual_seed(snake_case__ )
torch.cuda.manual_seed_all(snake_case__ )
# ^^ safe to call this function even if cuda is not available
class __magic_name__ :
'''simple docstring'''
def __init__( self, lowercase_, lowercase_ = 0.9999, lowercase_ = 0.0, lowercase_ = 0, lowercase_ = False, lowercase_ = 1.0, lowercase_ = 2 / 3, lowercase_ = None, lowercase_ = None, **lowercase_, ) -> Optional[Any]:
"""simple docstring"""
if isinstance(A_, torch.nn.Module ):
a__ =(
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage`''', '''1.0.0''', A_, standard_warn=A_, )
a__ =parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
a__ =True
if kwargs.get('''max_value''', A_ ) is not None:
a__ ='''The `max_value` argument is deprecated. Please use `decay` instead.'''
deprecate('''max_value''', '''1.0.0''', A_, standard_warn=A_ )
a__ =kwargs['''max_value''']
if kwargs.get('''min_value''', A_ ) is not None:
a__ ='''The `min_value` argument is deprecated. Please use `min_decay` instead.'''
deprecate('''min_value''', '''1.0.0''', A_, standard_warn=A_ )
a__ =kwargs['''min_value''']
a__ =list(A_ )
a__ =[p.clone().detach() for p in parameters]
if kwargs.get('''device''', A_ ) is not None:
a__ ='''The `device` argument is deprecated. Please use `to` instead.'''
deprecate('''device''', '''1.0.0''', A_, standard_warn=A_ )
self.to(device=kwargs['''device'''] )
a__ =None
a__ =decay
a__ =min_decay
a__ =update_after_step
a__ =use_ema_warmup
a__ =inv_gamma
a__ =power
a__ =0
a__ =None # set in `step()`
a__ =model_cls
a__ =model_config
@classmethod
def _UpperCAmelCase ( cls, lowercase_, lowercase_ ) -> "EMAModel":
"""simple docstring"""
a__, a__ =model_cls.load_config(A_, return_unused_kwargs=A_ )
a__ =model_cls.from_pretrained(A_ )
a__ =cls(model.parameters(), model_cls=A_, model_config=model.config )
ema_model.load_state_dict(A_ )
return ema_model
def _UpperCAmelCase ( self, lowercase_ ) -> List[str]:
"""simple docstring"""
if self.model_cls is None:
raise ValueError('''`save_pretrained` can only be used if `model_cls` was defined at __init__.''' )
if self.model_config is None:
raise ValueError('''`save_pretrained` can only be used if `model_config` was defined at __init__.''' )
a__ =self.model_cls.from_config(self.model_config )
a__ =self.state_dict()
state_dict.pop('''shadow_params''', A_ )
model.register_to_config(**A_ )
self.copy_to(model.parameters() )
model.save_pretrained(A_ )
def _UpperCAmelCase ( self, lowercase_ ) -> float:
"""simple docstring"""
a__ =max(0, optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
a__ =1 - (1 + step / self.inv_gamma) ** -self.power
else:
a__ =(1 + step) / (10 + step)
a__ =min(A_, self.decay )
# make sure decay is not smaller than min_decay
a__ =max(A_, self.min_decay )
return cur_decay_value
@torch.no_grad()
def _UpperCAmelCase ( self, lowercase_ ) -> Any:
"""simple docstring"""
if isinstance(A_, torch.nn.Module ):
a__ =(
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage.step`''', '''1.0.0''', A_, standard_warn=A_, )
a__ =parameters.parameters()
a__ =list(A_ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
a__ =self.get_decay(self.optimization_step )
a__ =decay
a__ =1 - decay
a__ =contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params, A_ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
a__ =deepspeed.zero.GatheredParameters(A_, modifier_rank=A_ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(A_ )
def _UpperCAmelCase ( self, lowercase_ ) -> None:
"""simple docstring"""
a__ =list(A_ )
for s_param, param in zip(self.shadow_params, A_ ):
param.data.copy_(s_param.to(param.device ).data )
def _UpperCAmelCase ( self, lowercase_=None, lowercase_=None ) -> None:
"""simple docstring"""
a__ =[
p.to(device=A_, dtype=A_ ) if p.is_floating_point() else p.to(device=A_ )
for p in self.shadow_params
]
def _UpperCAmelCase ( self ) -> dict:
"""simple docstring"""
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def _UpperCAmelCase ( self, lowercase_ ) -> None:
"""simple docstring"""
a__ =[param.detach().cpu().clone() for param in parameters]
def _UpperCAmelCase ( self, lowercase_ ) -> None:
"""simple docstring"""
if self.temp_stored_params is None:
raise RuntimeError('''This ExponentialMovingAverage has no `store()`ed weights ''' '''to `restore()`''' )
for c_param, param in zip(self.temp_stored_params, A_ ):
param.data.copy_(c_param.data )
# Better memory-wise.
a__ =None
def _UpperCAmelCase ( self, lowercase_ ) -> None:
"""simple docstring"""
a__ =copy.deepcopy(A_ )
a__ =state_dict.get('''decay''', self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError('''Decay must be between 0 and 1''' )
a__ =state_dict.get('''min_decay''', self.min_decay )
if not isinstance(self.min_decay, A_ ):
raise ValueError('''Invalid min_decay''' )
a__ =state_dict.get('''optimization_step''', self.optimization_step )
if not isinstance(self.optimization_step, A_ ):
raise ValueError('''Invalid optimization_step''' )
a__ =state_dict.get('''update_after_step''', self.update_after_step )
if not isinstance(self.update_after_step, A_ ):
raise ValueError('''Invalid update_after_step''' )
a__ =state_dict.get('''use_ema_warmup''', self.use_ema_warmup )
if not isinstance(self.use_ema_warmup, A_ ):
raise ValueError('''Invalid use_ema_warmup''' )
a__ =state_dict.get('''inv_gamma''', self.inv_gamma )
if not isinstance(self.inv_gamma, (float, int) ):
raise ValueError('''Invalid inv_gamma''' )
a__ =state_dict.get('''power''', self.power )
if not isinstance(self.power, (float, int) ):
raise ValueError('''Invalid power''' )
a__ =state_dict.get('''shadow_params''', A_ )
if shadow_params is not None:
a__ =shadow_params
if not isinstance(self.shadow_params, A_ ):
raise ValueError('''shadow_params must be a list''' )
if not all(isinstance(A_, torch.Tensor ) for p in self.shadow_params ):
raise ValueError('''shadow_params must all be Tensors''' )
| 188 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_lowercase = 16
_lowercase = 32
def _snake_case ( snake_case__ : Accelerator , snake_case__ : int = 16 , snake_case__ : str = "bert-base-cased" ):
A = AutoTokenizer.from_pretrained(snake_case__ )
A = load_dataset('glue' , 'mrpc' )
def tokenize_function(snake_case__ : Dict ):
# max_length=None => use the model max length (it's actually the default)
A = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=snake_case__ , max_length=snake_case__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
A = datasets.map(
snake_case__ , batched=snake_case__ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=snake_case__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(snake_case__ : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case__ , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(snake_case__ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
A = DataLoader(
tokenized_datasets['train'] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
A = DataLoader(
tokenized_datasets['validation'] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
return train_dataloader, eval_dataloader
def _snake_case ( snake_case__ : Optional[int] , snake_case__ : Optional[int] ):
# Initialize accelerator
A = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A = config['lr']
A = int(config['num_epochs'] )
A = int(config['seed'] )
A = int(config['batch_size'] )
A = args.model_name_or_path
set_seed(snake_case__ )
A , A = get_dataloaders(snake_case__ , snake_case__ , snake_case__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A = AutoModelForSequenceClassification.from_pretrained(snake_case__ , return_dict=snake_case__ )
# Instantiate optimizer
A = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
A = optimizer_cls(params=model.parameters() , lr=snake_case__ )
if accelerator.state.deepspeed_plugin is not None:
A = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
A = 1
A = (len(snake_case__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
A = get_linear_schedule_with_warmup(
optimizer=snake_case__ , num_warmup_steps=0 , num_training_steps=snake_case__ , )
else:
A = DummyScheduler(snake_case__ , total_num_steps=snake_case__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A , A , A , A , A = accelerator.prepare(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# We need to keep track of how many total steps we have iterated over
A = 0
# We also need to keep track of the stating epoch so files are named properly
A = 0
# Now we train the model
A = evaluate.load('glue' , 'mrpc' )
A = 0
A = {}
for epoch in range(snake_case__ , snake_case__ ):
model.train()
for step, batch in enumerate(snake_case__ ):
A = model(**snake_case__ )
A = outputs.loss
A = loss / gradient_accumulation_steps
accelerator.backward(snake_case__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
A = 0
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A = model(**snake_case__ )
A = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
A , A = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(snake_case__ ) - 1:
A = predictions[: len(eval_dataloader.dataset ) - samples_seen]
A = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=snake_case__ , references=snake_case__ , )
A = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , snake_case__ )
A = eval_metric['accuracy']
if best_performance < eval_metric["accuracy"]:
A = eval_metric['accuracy']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F'Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f:
json.dump(snake_case__ , snake_case__ )
def _snake_case ( ):
A = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=snake_case__ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=snake_case__ , )
parser.add_argument(
'--output_dir' , type=snake_case__ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--performance_lower_bound' , type=snake_case__ , default=snake_case__ , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , )
parser.add_argument(
'--num_epochs' , type=snake_case__ , default=3 , help='Number of train epochs.' , )
A = parser.parse_args()
A = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(snake_case__ , snake_case__ )
if __name__ == "__main__":
main() | 74 | 0 |
'''simple docstring'''
from __future__ import annotations
from math import pow, sqrt
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance == 0:
return {"resistance": sqrt(pow(snake_case__ , 2 ) - pow(snake_case__ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(snake_case__ , 2 ) - pow(snake_case__ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(snake_case__ , 2 ) + pow(snake_case__ , 2 ) )}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 267 |
"""simple docstring"""
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] ,A_ : str ,A_ : Dict=13 ,A_ : str=7 ,A_ : str=True ,A_ : Any=True ,A_ : Optional[Any]=True ,A_ : Any=True ,A_ : Optional[Any]=True ,A_ : Any=False ,A_ : str=False ,A_ : Tuple=False ,A_ : str=2 ,A_ : Optional[int]=99 ,A_ : Union[str, Any]=0 ,A_ : Optional[Any]=32 ,A_ : Optional[int]=5 ,A_ : Optional[int]=4 ,A_ : Union[str, Any]=0.1 ,A_ : List[str]=0.1 ,A_ : Union[str, Any]=512 ,A_ : Union[str, Any]=2 ,A_ : Any=0.02 ,A_ : List[str]=2 ,A_ : int=4 ,A_ : int="last" ,A_ : Dict=True ,A_ : Union[str, Any]=None ,A_ : Any=0 ,) -> List[Any]:
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_input_lengths
A = use_token_type_ids
A = use_labels
A = gelu_activation
A = sinusoidal_embeddings
A = causal
A = asm
A = n_langs
A = vocab_size
A = n_special
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_sequence_label_size
A = initializer_range
A = num_labels
A = num_choices
A = summary_type
A = use_proj
A = scope
A = bos_token_id
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A = random_attention_mask([self.batch_size, self.seq_length] )
A = None
if self.use_input_lengths:
A = (
ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
A = None
if self.use_token_type_ids:
A = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs )
A = None
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A = ids_tensor([self.batch_size] ,2 ).float()
A = ids_tensor([self.batch_size] ,self.num_choices )
A = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
return XLMConfig(
vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,num_labels=self.num_labels ,bos_token_id=self.bos_token_id ,)
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Any ,A_ : int ,A_ : Dict ,A_ : str ,A_ : Optional[Any] ,A_ : List[str] ,A_ : Union[str, Any] ,A_ : int ,A_ : str ,) -> Any:
A = XLMModel(config=A_ )
model.to(A_ )
model.eval()
A = model(A_ ,lengths=A_ ,langs=A_ )
A = model(A_ ,langs=A_ )
A = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Any ,A_ : str ,A_ : Optional[int] ,A_ : Union[str, Any] ,A_ : Optional[int] ,A_ : str ,A_ : Any ,A_ : str ,A_ : Dict ,) -> Dict:
A = XLMWithLMHeadModel(A_ )
model.to(A_ )
model.eval()
A = model(A_ ,token_type_ids=A_ ,labels=A_ )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : List[str] ,A_ : Union[str, Any] ,A_ : Union[str, Any] ,A_ : List[str] ,A_ : Any ,A_ : Optional[int] ,A_ : Optional[int] ,A_ : Optional[int] ,A_ : Optional[Any] ,) -> int:
A = XLMForQuestionAnsweringSimple(A_ )
model.to(A_ )
model.eval()
A = model(A_ )
A = model(A_ ,start_positions=A_ ,end_positions=A_ )
A = outputs
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Tuple ,A_ : Optional[int] ,A_ : Any ,A_ : List[Any] ,A_ : int ,A_ : Tuple ,A_ : Tuple ,A_ : List[str] ,A_ : Optional[int] ,) -> List[Any]:
A = XLMForQuestionAnswering(A_ )
model.to(A_ )
model.eval()
A = model(A_ )
A = model(
A_ ,start_positions=A_ ,end_positions=A_ ,cls_index=A_ ,is_impossible=A_ ,p_mask=A_ ,)
A = model(
A_ ,start_positions=A_ ,end_positions=A_ ,cls_index=A_ ,is_impossible=A_ ,)
((A) , ) = result_with_labels.to_tuple()
A = model(A_ ,start_positions=A_ ,end_positions=A_ )
((A) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape ,() )
self.parent.assertEqual(result.start_top_log_probs.shape ,(self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape ,(self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape ,(self.batch_size,) )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Tuple ,A_ : int ,A_ : Optional[int] ,A_ : List[str] ,A_ : str ,A_ : Optional[Any] ,A_ : Optional[int] ,A_ : Optional[Any] ,A_ : List[Any] ,) -> Optional[int]:
A = XLMForSequenceClassification(A_ )
model.to(A_ )
model.eval()
A = model(A_ )
A = model(A_ ,labels=A_ )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : List[Any] ,A_ : str ,A_ : Optional[Any] ,A_ : List[Any] ,A_ : Optional[int] ,A_ : Tuple ,A_ : Union[str, Any] ,A_ : Optional[int] ,A_ : Optional[int] ,) -> List[str]:
A = self.num_labels
A = XLMForTokenClassification(A_ )
model.to(A_ )
model.eval()
A = model(A_ ,attention_mask=A_ ,labels=A_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Optional[int] ,A_ : Union[str, Any] ,A_ : List[str] ,A_ : Optional[int] ,A_ : List[str] ,A_ : Optional[Any] ,A_ : Union[str, Any] ,A_ : Dict ,A_ : List[Any] ,) -> List[str]:
A = self.num_choices
A = XLMForMultipleChoice(config=A_ )
model.to(A_ )
model.eval()
A = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A = model(
A_ ,attention_mask=A_ ,token_type_ids=A_ ,labels=A_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
A = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = config_and_inputs
A = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( _lowercase , _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Union[str, Any] = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
_lowerCamelCase: str = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_lowerCamelCase: Optional[int] = (
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[int] ,A_ : Union[str, Any] ,A_ : Union[str, Any] ,A_ : Any ,A_ : Any ) -> Any:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : str ,A_ : Optional[int] ,A_ : List[Any]=False ) -> int:
A = super()._prepare_for_class(A_ ,A_ ,return_labels=A_ )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A_ )
A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A_ )
return inputs_dict
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
A = XLMModelTester(self )
A = ConfigTester(self ,config_class=A_ ,emb_dim=37 )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Union[str, Any] ,A_ : Any ,A_ : str ,A_ : Tuple ,A_ : Any ,A_ : Any=False ,A_ : Any=1 ) -> List[Any]:
self.assertIsInstance(A_ ,A_ )
self.assertListEqual(
[isinstance(A_ ,A_ ) for iter_attentions in attentions] ,[True] * len(A_ ) )
self.assertEqual(len(A_ ) ,(max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(A_ ):
# adds PAD dummy token
A = min_length + idx + 1
A = min_length + idx + 1
A = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] ,[expected_shape] * len(A_ ) )
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Optional[int] ,A_ : str ,A_ : Optional[int] ,A_ : int ,A_ : Any ,A_ : str=False ,A_ : Any=1 ) -> Tuple:
self.assertIsInstance(A_ ,A_ )
self.assertListEqual(
[isinstance(A_ ,A_ ) for iter_hidden_states in hidden_states] ,[True] * len(A_ ) ,)
self.assertEqual(len(A_ ) ,(max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(A_ ):
# adds PAD dummy token
A = min_length + idx + 1
A = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] ,[expected_shape] * len(A_ ) ,)
pass
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = XLMModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
A = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(A_ )
A = torch.tensor([[14, 447]] ,dtype=torch.long ,device=A_ ) # the president
A = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
A = model.generate(A_ ,do_sample=A_ )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() ,A_ ) | 74 | 0 |
import os
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =os.path.dirname(os.path.realpath(snake_case__ ) )
SCREAMING_SNAKE_CASE =os.path.join(snake_case__, 'triangle.txt' )
with open(snake_case__ ) as f:
SCREAMING_SNAKE_CASE =f.readlines()
SCREAMING_SNAKE_CASE =[]
for line in triangle:
SCREAMING_SNAKE_CASE =[]
for number in line.strip().split(' ' ):
numbers_from_line.append(int(snake_case__ ) )
a.append(snake_case__ )
for i in range(1, len(snake_case__ ) ):
for j in range(len(a[i] ) ):
SCREAMING_SNAKE_CASE =a[i - 1][j] if j != len(a[i - 1] ) else 0
SCREAMING_SNAKE_CASE =a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(snake_case__, snake_case__ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 334 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
_lowercase = logging.get_logger(__name__)
@dataclass
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[int] = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self : int ,**A_ : Any ) -> Any:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
A = deprecated_arg[3:]
A = not kwargs.pop(A_ )
logger.warning(
F'{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'
F' {positive_arg}={kwargs[positive_arg]}' )
A = kwargs.pop('tpu_name' ,self.tpu_name )
A = kwargs.pop('device_idx' ,self.device_idx )
A = kwargs.pop('eager_mode' ,self.eager_mode )
A = kwargs.pop('use_xla' ,self.use_xla )
super().__init__(**A_ )
_lowerCamelCase: str = field(
default=_lowercase , metadata={'''help''': '''Name of TPU'''} , )
_lowerCamelCase: int = field(
default=0 , metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''} , )
_lowerCamelCase: bool = field(default=_lowercase , metadata={'''help''': '''Benchmark models in eager model.'''} )
_lowerCamelCase: bool = field(
default=_lowercase , metadata={
'''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'''
} , )
@cached_property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self ,['tf'] )
A = None
if self.tpu:
try:
if self.tpu_name:
A = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
A = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
A = None
return tpu
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self ,['tf'] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
A = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] ,'GPU' )
A = tf.distribute.OneDeviceStrategy(device=F'/gpu:{self.device_idx}' )
else:
tf.config.set_visible_devices([] ,'GPU' ) # disable GPU
A = tf.distribute.OneDeviceStrategy(device=F'/cpu:{self.device_idx}' )
return strategy
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> bool:
requires_backends(self ,['tf'] )
return self._setup_tpu is not None
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> "tf.distribute.Strategy":
requires_backends(self ,['tf'] )
return self._setup_strategy
@property
def _SCREAMING_SNAKE_CASE ( self : int ) -> str:
requires_backends(self ,['tf'] )
return tf.config.list_physical_devices('GPU' )
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
requires_backends(self ,['tf'] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def _SCREAMING_SNAKE_CASE ( self : str ) -> bool:
return self.n_gpu > 0 | 74 | 0 |
"""simple docstring"""
import unittest
from transformers import DonutProcessor
__lowerCamelCase = "naver-clova-ix/donut-base"
class UpperCamelCase__( unittest.TestCase ):
def snake_case__ ( self ) -> Any:
A__ = DonutProcessor.from_pretrained(A_ )
def snake_case__ ( self ) -> Optional[int]:
A__ = {
'name': 'John Doe',
'age': '99',
'city': 'Atlanta',
'state': 'GA',
'zip': '30301',
'phone': '123-4567',
'nicknames': [{'nickname': 'Johnny'}, {'nickname': 'JD'}],
}
A__ = (
'<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'
'<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'
'<s_nicknames><s_nickname>Johnny</s_nickname>'
'<sep/><s_nickname>JD</s_nickname></s_nicknames>'
)
A__ = self.processor.tokenajson(A_ )
self.assertDictEqual(A_ ,A_ )
| 221 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Tuple = '''dpt'''
def __init__( self : str ,A_ : Tuple=768 ,A_ : int=12 ,A_ : Optional[int]=12 ,A_ : Optional[int]=3072 ,A_ : List[str]="gelu" ,A_ : str=0.0 ,A_ : int=0.0 ,A_ : str=0.02 ,A_ : str=1e-12 ,A_ : str=384 ,A_ : Dict=16 ,A_ : Union[str, Any]=3 ,A_ : Dict=False ,A_ : Any=True ,A_ : Optional[int]=[2, 5, 8, 11] ,A_ : Optional[Any]="project" ,A_ : Tuple=[4, 2, 1, 0.5] ,A_ : int=[96, 192, 384, 768] ,A_ : int=256 ,A_ : str=-1 ,A_ : Optional[int]=False ,A_ : Optional[int]=True ,A_ : Union[str, Any]=0.4 ,A_ : Union[str, Any]=255 ,A_ : Union[str, Any]=0.1 ,A_ : List[str]=[1, 1024, 24, 24] ,A_ : List[str]=[0, 1] ,A_ : List[Any]=None ,**A_ : Tuple ,) -> Union[str, Any]:
super().__init__(**A_ )
A = hidden_size
A = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('Initializing the config with a `BiT` backbone.' )
A = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
}
A = BitConfig(**A_ )
elif isinstance(A_ ,A_ ):
logger.info('Initializing the config with a `BiT` backbone.' )
A = BitConfig(**A_ )
elif isinstance(A_ ,A_ ):
A = backbone_config
else:
raise ValueError(
F'backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.' )
A = backbone_featmap_shape
A = neck_ignore_stages
if readout_type != "project":
raise ValueError('Readout type must be \'project\' when using `DPT-hybrid` mode.' )
else:
A = None
A = None
A = []
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = image_size
A = patch_size
A = num_channels
A = qkv_bias
A = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('Readout_type must be one of [\'ignore\', \'add\', \'project\']' )
A = readout_type
A = reassemble_factors
A = neck_hidden_sizes
A = fusion_hidden_size
A = head_in_index
A = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
A = use_auxiliary_head
A = auxiliary_loss_weight
A = semantic_loss_ignore_index
A = semantic_classifier_dropout
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
A = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
A = self.backbone_config.to_dict()
A = self.__class__.model_type
return output | 74 | 0 |
"""simple docstring"""
from math import factorial, radians
def lowercase ( _snake_case : float , _snake_case : int = 18 , _snake_case : int = 10 ) ->Optional[Any]:
"""simple docstring"""
__snake_case : Any = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
__snake_case : List[Any] = radians(snake_case__ )
__snake_case : Optional[int] = angle_in_radians
__snake_case : Any = 3
__snake_case : Optional[int] = -1
for _ in range(snake_case__ ):
result += (b * (angle_in_radians**a)) / factorial(snake_case__ )
__snake_case : Any = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(snake_case__ , snake_case__ )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 102 |
"""simple docstring"""
from __future__ import annotations
import math
_lowercase = '''2020.9.26'''
_lowercase = '''xcodz-dot, cclaus, dhruvmanila'''
def _snake_case ( snake_case__ : float , snake_case__ : float , snake_case__ : float , snake_case__ : float , snake_case__ : float ):
if not all(isinstance(snake_case__ , (float, int) ) for val in locals().values() ):
A = F'Input values must either be float or int: {list(locals().values() )}'
raise TypeError(snake_case__ )
A = ((x * distance) / (z + distance)) * scale
A = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def _snake_case ( snake_case__ : float , snake_case__ : float , snake_case__ : float , snake_case__ : str , snake_case__ : float ):
if not isinstance(snake_case__ , snake_case__ ):
raise TypeError('Axis must be a str' )
A = locals()
del input_variables["axis"]
if not all(isinstance(snake_case__ , (float, int) ) for val in input_variables.values() ):
A = (
'Input values except axis must either be float or int: '
F'{list(input_variables.values() )}'
)
raise TypeError(snake_case__ )
A = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
A = x * math.cos(snake_case__ ) - y * math.sin(snake_case__ )
A = y * math.cos(snake_case__ ) + x * math.sin(snake_case__ )
A = z
elif axis == "x":
A = y * math.cos(snake_case__ ) - z * math.sin(snake_case__ )
A = z * math.cos(snake_case__ ) + y * math.sin(snake_case__ )
A = x
elif axis == "y":
A = x * math.cos(snake_case__ ) - z * math.sin(snake_case__ )
A = z * math.cos(snake_case__ ) + x * math.sin(snake_case__ )
A = y
else:
raise ValueError('not a valid axis, choose one of \'x\', \'y\', \'z\'' )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }""")
print(F"""{rotate(1.0, 2.0, 3.0, 'y', 90.0) = }""") | 74 | 0 |
import numpy as np
from PIL import Image
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Union[str, Any] = np.array(snake_case__ )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
__UpperCamelCase :int = 0
__UpperCamelCase :Tuple = 0
__UpperCamelCase :Dict = 0
__UpperCamelCase :str = 0
# compute the shape of the output matrix
__UpperCamelCase :Dict = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
__UpperCamelCase :Tuple = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
__UpperCamelCase :List[str] = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__UpperCamelCase :int = 0
__UpperCamelCase :int = 0
return updated_arr
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Tuple = np.array(snake_case__ )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
__UpperCamelCase :Any = 0
__UpperCamelCase :Union[str, Any] = 0
__UpperCamelCase :List[str] = 0
__UpperCamelCase :Optional[int] = 0
# compute the shape of the output matrix
__UpperCamelCase :Tuple = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
__UpperCamelCase :int = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
__UpperCamelCase :Union[str, Any] = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__UpperCamelCase :Union[str, Any] = 0
__UpperCamelCase :List[str] = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='''avgpooling''', verbose=True)
# Loading the image
__lowercase = Image.open('''path_to_image''')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 43 |
"""simple docstring"""
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : int ,A_ : int ) -> Union[str, Any]:
A = n
A = [None] * self.n
A = 0 # index of the first element
A = 0
A = 0
def __len__( self : int ) -> int:
return self.size
def _SCREAMING_SNAKE_CASE ( self : Any ) -> bool:
return self.size == 0
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
return False if self.is_empty() else self.array[self.front]
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : List[Any] ) -> int:
if self.size >= self.n:
raise Exception('QUEUE IS FULL' )
A = data
A = (self.rear + 1) % self.n
self.size += 1
return self
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
if self.size == 0:
raise Exception('UNDERFLOW' )
A = self.array[self.front]
A = None
A = (self.front + 1) % self.n
self.size -= 1
return temp | 74 | 0 |
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
__UpperCamelCase : Dict = logging.getLogger(__name__)
torch.set_grad_enabled(False)
__UpperCamelCase : str = "cuda" if torch.cuda.is_available() else "cpu"
def _a ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Dict=100 , SCREAMING_SNAKE_CASE : int=" " ):
"""simple docstring"""
UpperCamelCase__ : List[str] = text.split(snake_case__ )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(snake_case__ ) , snake_case__ )]
def _a ( SCREAMING_SNAKE_CASE : dict ):
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ : List[Any] = [], []
for title, text in zip(documents['''title'''] , documents['''text'''] ):
if text is not None:
for passage in split_text(snake_case__ ):
titles.append(title if title is not None else '''''' )
texts.append(snake_case__ )
return {"title": titles, "text": texts}
def _a ( SCREAMING_SNAKE_CASE : dict , SCREAMING_SNAKE_CASE : DPRContextEncoder , SCREAMING_SNAKE_CASE : DPRContextEncoderTokenizerFast ):
"""simple docstring"""
UpperCamelCase__ : List[str] = ctx_tokenizer(
documents['''title'''] , documents['''text'''] , truncation=snake_case__ , padding='''longest''' , return_tensors='''pt''' )['''input_ids''']
UpperCamelCase__ : List[Any] = ctx_encoder(input_ids.to(device=snake_case__ ) , return_dict=snake_case__ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def _a ( SCREAMING_SNAKE_CASE : "RagExampleArguments" , SCREAMING_SNAKE_CASE : "ProcessingArguments" , SCREAMING_SNAKE_CASE : "IndexHnswArguments" , ):
"""simple docstring"""
logger.info('''Step 1 - Create the dataset''' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
UpperCamelCase__ : Tuple = load_dataset(
'''csv''' , data_files=[rag_example_args.csv_path] , split='''train''' , delimiter='''\t''' , column_names=['''title''', '''text'''] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
UpperCamelCase__ : List[Any] = dataset.map(snake_case__ , batched=snake_case__ , num_proc=processing_args.num_proc )
# And compute the embeddings
UpperCamelCase__ : Union[str, Any] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=snake_case__ )
UpperCamelCase__ : int = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
UpperCamelCase__ : Tuple = Features(
{'''text''': Value('''string''' ), '''title''': Value('''string''' ), '''embeddings''': Sequence(Value('''float32''' ) )} ) # optional, save as float32 instead of float64 to save space
UpperCamelCase__ : List[str] = dataset.map(
partial(snake_case__ , ctx_encoder=snake_case__ , ctx_tokenizer=snake_case__ ) , batched=snake_case__ , batch_size=processing_args.batch_size , features=snake_case__ , )
# And finally save your dataset
UpperCamelCase__ : Dict = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset''' )
dataset.save_to_disk(snake_case__ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('''Step 2 - Index the dataset''' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
UpperCamelCase__ : Dict = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('''embeddings''' , custom_index=snake_case__ )
# And save the index
UpperCamelCase__ : Optional[Any] = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset_hnsw_index.faiss''' )
dataset.get_index('''embeddings''' ).save(snake_case__ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class __magic_name__ :
A: str = field(
default=str(Path(_lowercase).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv") , metadata={"help": "Path to a tab-separated csv file with columns \'title\' and \'text\'"} , )
A: Optional[str] = field(
default=_lowercase , metadata={"help": "Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'."} , )
A: str = field(
default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\'"} , )
A: str = field(
default="facebook/dpr-ctx_encoder-multiset-base" , metadata={
"help": (
"The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or"
" \'facebook/dpr-ctx_encoder-multiset-base\'"
)
} , )
A: Optional[str] = field(
default=str(Path(_lowercase).parent / "test_run" / "dummy-kb") , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , )
@dataclass
class __magic_name__ :
A: Optional[int] = field(
default=_lowercase , metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
} , )
A: int = field(
default=1_6 , metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
} , )
@dataclass
class __magic_name__ :
A: int = field(
default=7_6_8 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , )
A: int = field(
default=1_2_8 , metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
__UpperCamelCase : int = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : List[str] = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
__UpperCamelCase : List[str] = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 146 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
_lowercase = logging.get_logger(__name__)
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,*A_ : List[str] ,**A_ : int ) -> None:
warnings.warn(
'The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use YolosImageProcessor instead.' ,A_ ,)
super().__init__(*A_ ,**A_ ) | 74 | 0 |
'''simple docstring'''
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
UpperCAmelCase_ = {
# 1536-bit
5: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF',
base=1_6,
),
'generator': 2,
},
# 2048-bit
1_4: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AACAA68FFFFFFFFFFFFFFFF',
base=1_6,
),
'generator': 2,
},
# 3072-bit
1_5: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF',
base=1_6,
),
'generator': 2,
},
# 4096-bit
1_6: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'
+ 'FFFFFFFFFFFFFFFF',
base=1_6,
),
'generator': 2,
},
# 6144-bit
1_7: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'
+ '8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'
+ '302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'
+ 'A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'
+ '49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'
+ 'FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'
+ '180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'
+ '3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'
+ '04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'
+ 'B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'
+ '1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'
+ 'E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'
+ '99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'
+ '04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'
+ '233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'
+ 'D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'
+ 'AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'
+ 'DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'
+ '2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'
+ 'F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'
+ 'BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'
+ 'B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'
+ '387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'
+ '6DCC4024FFFFFFFFFFFFFFFF',
base=1_6,
),
'generator': 2,
},
# 8192-bit
1_8: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'
+ 'F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'
+ '179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'
+ 'DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'
+ '5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'
+ 'D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'
+ '23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'
+ '06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'
+ 'DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'
+ '12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'
+ '38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'
+ '741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'
+ '3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'
+ '22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'
+ '4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'
+ '062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'
+ '4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'
+ 'B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'
+ '4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'
+ '9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'
+ '60C980DD98EDD3DFFFFFFFFFFFFFFFFF',
base=1_6,
),
'generator': 2,
},
}
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : str , _UpperCAmelCase : int = 14 ):
"""simple docstring"""
if group not in primes:
raise ValueError("""Unsupported Group""" )
UpperCAmelCase__ = primes[group]["""prime"""]
UpperCAmelCase__ = primes[group]["""generator"""]
UpperCAmelCase__ = int(hexlify(urandom(32 ) ) , base=16 )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
return hex(self.__private_key )[2:]
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
UpperCAmelCase__ = pow(self.generator , self.__private_key , self.prime )
return hex(A_ )[2:]
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : int ):
"""simple docstring"""
return (
2 <= key <= self.prime - 2
and pow(A_ , (self.prime - 1) // 2 , self.prime ) == 1
)
def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : str ):
"""simple docstring"""
UpperCAmelCase__ = int(A_ , base=16 )
if not self.is_valid_public_key(A_ ):
raise ValueError("""Invalid public key""" )
UpperCAmelCase__ = pow(A_ , self.__private_key , self.prime )
return shaaaa(str(A_ ).encode() ).hexdigest()
@staticmethod
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase : int , _UpperCAmelCase : int ):
"""simple docstring"""
return (
2 <= remote_public_key_str <= prime - 2
and pow(A_ , (prime - 1) // 2 , A_ ) == 1
)
@staticmethod
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : int = 14 ):
"""simple docstring"""
UpperCAmelCase__ = int(A_ , base=16 )
UpperCAmelCase__ = int(A_ , base=16 )
UpperCAmelCase__ = primes[group]["""prime"""]
if not DiffieHellman.is_valid_public_key_static(A_ , A_ ):
raise ValueError("""Invalid public key""" )
UpperCAmelCase__ = pow(A_ , A_ , A_ )
return shaaaa(str(A_ ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 346 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''',
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: List[str] = '''gpt_bigcode'''
_lowerCamelCase: List[Any] = ['''past_key_values''']
_lowerCamelCase: int = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Optional[int] ,A_ : Dict=5_0257 ,A_ : Union[str, Any]=1024 ,A_ : str=768 ,A_ : Any=12 ,A_ : Any=12 ,A_ : Optional[int]=None ,A_ : Any="gelu_pytorch_tanh" ,A_ : List[str]=0.1 ,A_ : Optional[int]=0.1 ,A_ : List[str]=0.1 ,A_ : Tuple=1e-5 ,A_ : Optional[int]=0.02 ,A_ : List[str]=True ,A_ : Optional[Any]=True ,A_ : List[Any]=5_0256 ,A_ : Union[str, Any]=5_0256 ,A_ : int=True ,A_ : Optional[Any]=True ,A_ : Dict=True ,**A_ : Union[str, Any] ,) -> Union[str, Any]:
A = vocab_size
A = n_positions
A = n_embd
A = n_layer
A = n_head
A = n_inner
A = activation_function
A = resid_pdrop
A = embd_pdrop
A = attn_pdrop
A = layer_norm_epsilon
A = initializer_range
A = scale_attn_weights
A = use_cache
A = attention_softmax_in_fpaa
A = scale_attention_softmax_in_fpaa
A = multi_query
A = bos_token_id
A = eos_token_id
super().__init__(bos_token_id=A_ ,eos_token_id=A_ ,**A_ ) | 74 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
lowercase_ = logging.get_logger(__name__)
lowercase_ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowercase_ = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
lowercase_ = {
'squeezebert/squeezebert-uncased': 512,
'squeezebert/squeezebert-mnli': 512,
'squeezebert/squeezebert-mnli-headless': 512,
}
lowercase_ = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class snake_case ( _lowercase ):
'''simple docstring'''
A_ : Dict = VOCAB_FILES_NAMES
A_ : Dict = PRETRAINED_VOCAB_FILES_MAP
A_ : List[Any] = PRETRAINED_INIT_CONFIGURATION
A_ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : List[str] = SqueezeBertTokenizer
def __init__( self : List[Any], _lowerCamelCase : Optional[Any]=None, _lowerCamelCase : Any=None, _lowerCamelCase : Optional[Any]=True, _lowerCamelCase : str="[UNK]", _lowerCamelCase : Optional[int]="[SEP]", _lowerCamelCase : Dict="[PAD]", _lowerCamelCase : Tuple="[CLS]", _lowerCamelCase : Dict="[MASK]", _lowerCamelCase : Tuple=True, _lowerCamelCase : Tuple=None, **_lowerCamelCase : int, ):
'''simple docstring'''
super().__init__(
A_, tokenizer_file=A_, do_lower_case=A_, unk_token=A_, sep_token=A_, pad_token=A_, cls_token=A_, mask_token=A_, tokenize_chinese_chars=A_, strip_accents=A_, **A_, )
__A = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''', A_ ) != do_lower_case
or normalizer_state.get('''strip_accents''', A_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''', A_ ) != tokenize_chinese_chars
):
__A = getattr(A_, normalizer_state.pop('''type''' ) )
__A = do_lower_case
__A = strip_accents
__A = tokenize_chinese_chars
__A = normalizer_class(**A_ )
__A = do_lower_case
def _SCREAMING_SNAKE_CASE ( self : List[Any], _lowerCamelCase : List[str], _lowerCamelCase : Optional[int]=None ):
'''simple docstring'''
__A = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _SCREAMING_SNAKE_CASE ( self : str, _lowerCamelCase : List[int], _lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
__A = [self.sep_token_id]
__A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _SCREAMING_SNAKE_CASE ( self : int, _lowerCamelCase : str, _lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
__A = self._tokenizer.model.save(A_, name=A_ )
return tuple(A_ )
| 266 |
"""simple docstring"""
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
_lowercase = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F"""{bindir}/../../examples/pytorch/translation"""):
from run_translation import main # noqa
set_seed(42)
_lowercase = '''sshleifer/student_marian_en_ro_6_1'''
_lowercase = '''sshleifer/tiny-mbart'''
@require_torch
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Union[str, Any]=False ,A_ : Optional[int]=None ,A_ : List[str]=True ,A_ : Tuple=True ,A_ : Union[str, Any]=True ,A_ : List[str]=True ,) -> Tuple:
A = self.run_trainer(
eval_steps=1 ,max_len=12 ,model_name=A_ ,num_train_epochs=1 ,distributed=A_ ,extra_args_str=A_ ,predict_with_generate=A_ ,do_train=A_ ,do_eval=A_ ,do_predict=A_ ,)
A = TrainerState.load_from_json(os.path.join(A_ ,'trainer_state.json' ) ).log_history
if not do_eval:
return
A = [log for log in logs if 'eval_loss' in log.keys()]
A = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
A = eval_metrics[-1]
assert isinstance(last_step_stats['eval_bleu'] ,A_ )
assert not math.isnan(float(last_step_stats['eval_loss'] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def _SCREAMING_SNAKE_CASE ( self : int ) -> int:
self.run_seqaseq_quick(distributed=A_ )
@require_torch_multi_gpu
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
self.run_seqaseq_quick(distributed=A_ )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
self.run_seqaseq_quick(distributed=A_ ,extra_args_str='--sharded_ddp simple' )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def _SCREAMING_SNAKE_CASE ( self : Any ) -> int:
self.run_seqaseq_quick(distributed=A_ ,extra_args_str='--sharded_ddp simple --fp16' )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
self.run_seqaseq_quick(distributed=A_ ,extra_args_str='--sharded_ddp zero_dp_2' ,predict_with_generate=A_ )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
self.run_seqaseq_quick(
distributed=A_ ,extra_args_str='--sharded_ddp zero_dp_2 --fp16' ,predict_with_generate=A_ )
@require_apex
@require_torch_gpu
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=A_ ,extra_args_str='--fp16 --fp16_backend=apex' )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=A_ ,extra_args_str='--fp16 --fp16_backend=apex' )
@parameterized.expand(['base', 'low', 'high', 'mixed'] )
@require_torch_multi_gpu
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : Dict ) -> List[str]:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
A = {
# test with the default log_level - should be info and thus log info once
'base': {'extra_args_str': '', 'n_matches': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'low': {'extra_args_str': '--log_level debug --log_level_replica debug', 'n_matches': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'high': {'extra_args_str': '--log_level error --log_level_replica debug', 'n_matches': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'mixed': {'extra_args_str': '--log_level error --log_level_replica error', 'n_matches': 0},
}
A = experiments[experiment_id]
A = {'distributed': True, 'predict_with_generate': False, 'do_eval': False, 'do_predict': False}
A = 'Running training'
with CaptureStderr() as cl:
self.run_seqaseq_quick(**A_ ,extra_args_str=data['extra_args_str'] )
A = len(re.findall(A_ ,cl.err ) )
self.assertEqual(A_ ,data['n_matches'] )
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
A = self.run_trainer(
eval_steps=2 ,max_len=128 ,model_name=A_ ,learning_rate=3e-4 ,num_train_epochs=10 ,distributed=A_ ,)
# Check metrics
A = TrainerState.load_from_json(os.path.join(A_ ,'trainer_state.json' ) ).log_history
A = [log for log in logs if 'eval_loss' in log.keys()]
A = eval_metrics[0]
A = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['eval_bleu'] ,A_ )
# test if do_predict saves generations and metrics
A = os.listdir(A_ )
A = {os.path.basename(A_ ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(A_ : str ) -> Tuple[int, float]:
A = '--skip_memory_metrics 0'
A = self.run_trainer(
max_len=128 ,model_name=A_ ,learning_rate=3e-4 ,num_train_epochs=1 ,optim=A_ ,distributed=A_ ,extra_args_str=A_ ,do_eval=A_ ,do_predict=A_ ,n_gpus_to_use=1 ,)
# Check metrics
A = TrainerState.load_from_json(Path(A_ ,'trainer_state.json' ) ).log_history
A = int(logs[0]['train_mem_gpu_peaked_delta'] / 2**20 )
A = int(logs[0]['train_mem_gpu_alloc_delta'] / 2**20 )
A = logs[0]['train_loss']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
A , A , A = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
A , A , A = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
A = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
A = gpu_peak_mem_orig + gpu_alloc_mem_orig
A = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
A = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
A = 120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
A_ ,A_ ,'should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'
F' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'
F' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB' ,)
self.assertGreater(
A_ ,A_ ,'should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'
F' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'
F' gpu_total_mem_bnb={gpu_total_mem_bnb}MB' ,)
self.assertEqual(
A_ ,A_ ,F'loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}' )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : int ,A_ : str ,A_ : int ,A_ : float = 3e-3 ,A_ : str = "adafactor" ,A_ : bool = False ,A_ : str = None ,A_ : int = 0 ,A_ : bool = True ,A_ : bool = True ,A_ : bool = True ,A_ : bool = True ,A_ : int = None ,) -> Dict:
A = self.test_file_dir / '../fixtures/tests_samples/wmt_en_ro'
A = self.get_auto_remove_tmp_dir()
A = F'\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(A_ )}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(A_ )}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n '.split()
A = F'\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(A_ )}\n '.split()
A = '\n --do_predict\n '.split()
A = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F'--optim {optim}'.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
A = get_gpu_count()
A = get_torch_dist_unique_port()
A = F'\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n '.split()
A = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(A_ ,env=self.get_env() )
else:
A = ['run_translation.py'] + args
with patch.object(A_ ,'argv' ,A_ ):
main()
return output_dir | 74 | 0 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
lowerCAmelCase__ = """\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
"""
lowerCAmelCase__ = """\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
"""
lowerCAmelCase__ = """
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: \"c\" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric(\'mauve\')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
"""simple docstring"""
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/krishnap25/mauve" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/krishnap25/mauve"] , reference_urls=[
"https://arxiv.org/abs/2102.01454",
"https://github.com/krishnap25/mauve",
] , )
def UpperCamelCase ( self , lowercase , lowercase , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase="auto" , lowercase=-1 , lowercase=0.9 , lowercase=5 , lowercase=500 , lowercase="gpt2-large" , lowercase=-1 , lowercase=1024 , lowercase=25 , lowercase=5 , lowercase=True , lowercase=25 , ) -> str:
'''simple docstring'''
A__ = compute_mauve(
p_text=A_ , q_text=A_ , p_features=A_ , q_features=A_ , p_tokens=A_ , q_tokens=A_ , num_buckets=A_ , pca_max_data=A_ , kmeans_explained_var=A_ , kmeans_num_redo=A_ , kmeans_max_iter=A_ , featurize_model_name=A_ , device_id=A_ , max_text_length=A_ , divergence_curve_discretization_size=A_ , mauve_scaling_factor=A_ , verbose=A_ , seed=A_ , )
return out
| 68 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''facebook/deit-base-distilled-patch16-224''': (
'''https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json'''
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[Any] = '''deit'''
def __init__( self : int ,A_ : Optional[Any]=768 ,A_ : Union[str, Any]=12 ,A_ : Dict=12 ,A_ : int=3072 ,A_ : Optional[Any]="gelu" ,A_ : Dict=0.0 ,A_ : Any=0.0 ,A_ : str=0.02 ,A_ : Tuple=1e-12 ,A_ : Union[str, Any]=224 ,A_ : Optional[Any]=16 ,A_ : List[Any]=3 ,A_ : Optional[Any]=True ,A_ : Optional[int]=16 ,**A_ : Union[str, Any] ,) -> Dict:
super().__init__(**A_ )
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = image_size
A = patch_size
A = num_channels
A = qkv_bias
A = encoder_stride
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: int = version.parse('''1.11''' )
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> float:
return 1e-4 | 74 | 0 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
lowerCAmelCase = logging.get_logger(__name__)
@dataclass
class A_ ( _lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self :int , **lowerCamelCase_ :Any ):
"""simple docstring"""
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowerCamelCase__ : List[str] =deprecated_arg[3:]
lowerCamelCase__ : Any =not kwargs.pop(A_ )
logger.warning(
f"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"""
f""" {positive_arg}={kwargs[positive_arg]}""" )
lowerCamelCase__ : Any =kwargs.pop('tpu_name' , self.tpu_name )
lowerCamelCase__ : Optional[int] =kwargs.pop('device_idx' , self.device_idx )
lowerCamelCase__ : int =kwargs.pop('eager_mode' , self.eager_mode )
lowerCamelCase__ : Union[str, Any] =kwargs.pop('use_xla' , self.use_xla )
super().__init__(**A_ )
SCREAMING_SNAKE_CASE_ = field(
default=_lowercase , metadata={"""help""": """Name of TPU"""} , )
SCREAMING_SNAKE_CASE_ = field(
default=0 , metadata={"""help""": """CPU / GPU device index. Defaults to 0."""} , )
SCREAMING_SNAKE_CASE_ = field(default=_lowercase , metadata={"""help""": """Benchmark models in eager model."""} )
SCREAMING_SNAKE_CASE_ = field(
default=_lowercase , metadata={
"""help""": """Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."""
} , )
@cached_property
def UpperCAmelCase__ ( self :List[Any] ):
"""simple docstring"""
requires_backends(self , ['tf'] )
lowerCamelCase__ : Union[str, Any] =None
if self.tpu:
try:
if self.tpu_name:
lowerCamelCase__ : List[Any] =tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
lowerCamelCase__ : List[Any] =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
lowerCamelCase__ : Any =None
return tpu
@cached_property
def UpperCAmelCase__ ( self :Tuple ):
"""simple docstring"""
requires_backends(self , ['tf'] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
lowerCamelCase__ : List[str] =tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , 'GPU' )
lowerCamelCase__ : int =tf.distribute.OneDeviceStrategy(device=f"""/gpu:{self.device_idx}""" )
else:
tf.config.set_visible_devices([] , 'GPU' ) # disable GPU
lowerCamelCase__ : Tuple =tf.distribute.OneDeviceStrategy(device=f"""/cpu:{self.device_idx}""" )
return strategy
@property
def UpperCAmelCase__ ( self :Optional[Any] ):
"""simple docstring"""
requires_backends(self , ['tf'] )
return self._setup_tpu is not None
@property
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ['tf'] )
return self._setup_strategy
@property
def UpperCAmelCase__ ( self :int ):
"""simple docstring"""
requires_backends(self , ['tf'] )
return tf.config.list_physical_devices('GPU' )
@property
def UpperCAmelCase__ ( self :List[str] ):
"""simple docstring"""
requires_backends(self , ['tf'] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
return self.n_gpu > 0 | 126 |
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _snake_case ( snake_case__ : List[Any] , snake_case__ : Optional[int]=0.999 , snake_case__ : Union[str, Any]="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(snake_case__ : Union[str, Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(snake_case__ : Dict ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' )
A = []
for i in range(snake_case__ ):
A = i / num_diffusion_timesteps
A = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(snake_case__ ) / alpha_bar_fn(snake_case__ ) , snake_case__ ) )
return torch.tensor(snake_case__ , dtype=torch.floataa )
class lowerCAmelCase_ ( _lowercase , _lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[int] = [e.name for e in KarrasDiffusionSchedulers]
_lowerCamelCase: Optional[Any] = 2
@register_to_config
def __init__( self : str ,A_ : int = 1000 ,A_ : float = 0.0_00_85 ,A_ : float = 0.0_12 ,A_ : str = "linear" ,A_ : Optional[Union[np.ndarray, List[float]]] = None ,A_ : str = "epsilon" ,A_ : Optional[bool] = False ,A_ : Optional[bool] = False ,A_ : float = 1.0 ,A_ : str = "linspace" ,A_ : int = 0 ,) -> List[str]:
if trained_betas is not None:
A = torch.tensor(A_ ,dtype=torch.floataa )
elif beta_schedule == "linear":
A = torch.linspace(A_ ,A_ ,A_ ,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
A = (
torch.linspace(beta_start**0.5 ,beta_end**0.5 ,A_ ,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
A = betas_for_alpha_bar(A_ ,alpha_transform_type='cosine' )
elif beta_schedule == "exp":
A = betas_for_alpha_bar(A_ ,alpha_transform_type='exp' )
else:
raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}' )
A = 1.0 - self.betas
A = torch.cumprod(self.alphas ,dim=0 )
# set all values
self.set_timesteps(A_ ,A_ ,A_ )
A = use_karras_sigmas
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Tuple ,A_ : Tuple=None ) -> Tuple:
if schedule_timesteps is None:
A = self.timesteps
A = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
A = 1 if len(A_ ) > 1 else 0
else:
A = timestep.cpu().item() if torch.is_tensor(A_ ) else timestep
A = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : torch.FloatTensor ,A_ : Union[float, torch.FloatTensor] ,) -> torch.FloatTensor:
A = self.index_for_timestep(A_ )
A = self.sigmas[step_index]
A = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : int ,A_ : Union[str, torch.device] = None ,A_ : Optional[int] = None ,) -> Optional[Any]:
A = num_inference_steps
A = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
A = np.linspace(0 ,num_train_timesteps - 1 ,A_ ,dtype=A_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
A = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
A = (np.arange(0 ,A_ ) * step_ratio).round()[::-1].copy().astype(A_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
A = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
A = (np.arange(A_ ,0 ,-step_ratio )).round().copy().astype(A_ )
timesteps -= 1
else:
raise ValueError(
F'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' )
A = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
A = np.log(A_ )
A = np.interp(A_ ,np.arange(0 ,len(A_ ) ) ,A_ )
if self.config.use_karras_sigmas:
A = self._convert_to_karras(in_sigmas=A_ ,num_inference_steps=self.num_inference_steps )
A = np.array([self._sigma_to_t(A_ ,A_ ) for sigma in sigmas] )
A = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
A = torch.from_numpy(A_ ).to(device=A_ )
A = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
A = torch.from_numpy(A_ )
A = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(A_ ).startswith('mps' ):
# mps does not support float64
A = timesteps.to(A_ ,dtype=torch.floataa )
else:
A = timesteps.to(device=A_ )
# empty dt and derivative
A = None
A = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
A = defaultdict(A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[Any] ,A_ : List[str] ) -> Dict:
# get log sigma
A = np.log(A_ )
# get distribution
A = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
A = np.cumsum((dists >= 0) ,axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
A = low_idx + 1
A = log_sigmas[low_idx]
A = log_sigmas[high_idx]
# interpolate sigmas
A = (low - log_sigma) / (low - high)
A = np.clip(A_ ,0 ,1 )
# transform interpolation to time range
A = (1 - w) * low_idx + w * high_idx
A = t.reshape(sigma.shape )
return t
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : torch.FloatTensor ,A_ : int ) -> torch.FloatTensor:
A = in_sigmas[-1].item()
A = in_sigmas[0].item()
A = 7.0 # 7.0 is the value used in the paper
A = np.linspace(0 ,1 ,A_ )
A = sigma_min ** (1 / rho)
A = sigma_max ** (1 / rho)
A = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
return self.dt is None
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Union[torch.FloatTensor, np.ndarray] ,A_ : Union[float, torch.FloatTensor] ,A_ : Union[torch.FloatTensor, np.ndarray] ,A_ : bool = True ,) -> Union[SchedulerOutput, Tuple]:
A = self.index_for_timestep(A_ )
# advance index counter by 1
A = timestep.cpu().item() if torch.is_tensor(A_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
A = self.sigmas[step_index]
A = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
A = self.sigmas[step_index - 1]
A = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
A = 0
A = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
A = sigma_hat if self.state_in_first_order else sigma_next
A = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
A = sigma_hat if self.state_in_first_order else sigma_next
A = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
A = model_output
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' )
if self.config.clip_sample:
A = pred_original_sample.clamp(
-self.config.clip_sample_range ,self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
A = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
A = sigma_next - sigma_hat
# store for 2nd order step
A = derivative
A = dt
A = sample
else:
# 2. 2nd order / Heun's method
A = (sample - pred_original_sample) / sigma_next
A = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
A = self.dt
A = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
A = None
A = None
A = None
A = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=A_ )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : torch.FloatTensor ,A_ : torch.FloatTensor ,A_ : torch.FloatTensor ,) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
A = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(A_ ):
# mps does not support float64
A = self.timesteps.to(original_samples.device ,dtype=torch.floataa )
A = timesteps.to(original_samples.device ,dtype=torch.floataa )
else:
A = self.timesteps.to(original_samples.device )
A = timesteps.to(original_samples.device )
A = [self.index_for_timestep(A_ ,A_ ) for t in timesteps]
A = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
A = sigma.unsqueeze(-1 )
A = original_samples + noise * sigma
return noisy_samples
def __len__( self : Dict ) -> int:
return self.config.num_train_timesteps | 74 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = '''▁'''
lowerCamelCase = {'''vocab_file''': '''spiece.model'''}
lowerCamelCase = {
'''vocab_file''': {
'''google/reformer-crime-and-punishment''': (
'''https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'''
)
}
}
lowerCamelCase = {
'''google/reformer-crime-and-punishment''': 52_4288,
}
class __magic_name__ ( _lowercase ):
'''simple docstring'''
lowerCamelCase__ : Any = VOCAB_FILES_NAMES
lowerCamelCase__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : Any = ['''input_ids''', '''attention_mask''']
def __init__( self, lowercase_, lowercase_="</s>", lowercase_="<unk>", lowercase_=[], lowercase_ = None, **lowercase_, ) -> None:
"""simple docstring"""
a__ ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=A_, unk_token=A_, additional_special_tokens=A_, sp_model_kwargs=self.sp_model_kwargs, **A_, )
a__ =vocab_file
a__ =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A_ )
@property
def _UpperCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
return self.sp_model.get_piece_size()
def _UpperCAmelCase ( self ) -> Dict[str, int]:
"""simple docstring"""
a__ ={self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Tuple:
"""simple docstring"""
a__ =self.__dict__.copy()
a__ =None
return state
def __setstate__( self, lowercase_ ) -> str:
"""simple docstring"""
a__ =d
# for backward compatibility
if not hasattr(self, '''sp_model_kwargs''' ):
a__ ={}
a__ =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCAmelCase ( self, lowercase_ ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(A_, out_type=A_ )
def _UpperCAmelCase ( self, lowercase_ ) -> Optional[Any]:
"""simple docstring"""
return self.sp_model.piece_to_id(A_ )
def _UpperCAmelCase ( self, lowercase_ ) -> Dict:
"""simple docstring"""
if index < self.sp_model.get_piece_size():
a__ =self.sp_model.IdToPiece(A_ )
return token
def _UpperCAmelCase ( self, lowercase_ ) -> Dict:
"""simple docstring"""
a__ =[]
a__ =''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A_ ) + token
a__ =[]
else:
current_sub_tokens.append(A_ )
out_string += self.sp_model.decode(A_ )
return out_string.strip()
def _UpperCAmelCase ( self, lowercase_, lowercase_ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(A_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
a__ =os.path.join(
A_, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_, '''wb''' ) as fi:
a__ =self.sp_model.serialized_model_proto()
fi.write(A_ )
return (out_vocab_file,)
| 188 |
"""simple docstring"""
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Dict ,A_ : list[int] ) -> None:
A = len(A_ )
A = [0] * len_array
if len_array > 0:
A = array[0]
for i in range(1 ,A_ ):
A = self.prefix_sum[i - 1] + array[i]
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : int ,A_ : int ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : int ) -> bool:
A = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(A_ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod() | 74 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase : int = logging.get_logger(__name__)
def a__ ( a__ ):
"""simple docstring"""
if "resnet-50" in model_name:
__SCREAMING_SNAKE_CASE = ResNetConfig.from_pretrained("""microsoft/resnet-50""" )
elif "resnet-101" in model_name:
__SCREAMING_SNAKE_CASE = ResNetConfig.from_pretrained("""microsoft/resnet-101""" )
else:
raise ValueError("""Model name should include either resnet50 or resnet101""" )
__SCREAMING_SNAKE_CASE = DetrConfig(use_timm_backbone=snake_case__ , backbone_config=snake_case__ )
# set label attributes
__SCREAMING_SNAKE_CASE = """panoptic""" in model_name
if is_panoptic:
__SCREAMING_SNAKE_CASE = 2_50
else:
__SCREAMING_SNAKE_CASE = 91
__SCREAMING_SNAKE_CASE = """huggingface/label-files"""
__SCREAMING_SNAKE_CASE = """coco-detection-id2label.json"""
__SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="""dataset""" ) , """r""" ) )
__SCREAMING_SNAKE_CASE = {int(snake_case__ ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = idalabel
__SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
# stem
# fmt: off
rename_keys.append(("""backbone.0.body.conv1.weight""", """backbone.conv_encoder.model.embedder.embedder.convolution.weight""") )
rename_keys.append(("""backbone.0.body.bn1.weight""", """backbone.conv_encoder.model.embedder.embedder.normalization.weight""") )
rename_keys.append(("""backbone.0.body.bn1.bias""", """backbone.conv_encoder.model.embedder.embedder.normalization.bias""") )
rename_keys.append(("""backbone.0.body.bn1.running_mean""", """backbone.conv_encoder.model.embedder.embedder.normalization.running_mean""") )
rename_keys.append(("""backbone.0.body.bn1.running_var""", """backbone.conv_encoder.model.embedder.embedder.normalization.running_var""") )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight',
) )
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight',
) )
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias',
) )
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean',
) )
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight',
) )
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight',
) )
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias',
) )
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean',
) )
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F'transformer.encoder.layers.{i}.self_attn.out_proj.weight',
F'encoder.layers.{i}.self_attn.out_proj.weight',
) )
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias') )
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F'transformer.decoder.layers.{i}.self_attn.out_proj.weight',
F'decoder.layers.{i}.self_attn.out_proj.weight',
) )
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append(
(
F'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
F'decoder.layers.{i}.encoder_attn.out_proj.weight',
) )
rename_keys.append(
(
F'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
F'decoder.layers.{i}.encoder_attn.out_proj.bias',
) )
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias') )
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
] )
return rename_keys
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = state_dict.pop(snake_case__ )
__SCREAMING_SNAKE_CASE = val
def a__ ( a__ , a__=False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """"""
if is_panoptic:
__SCREAMING_SNAKE_CASE = """detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__SCREAMING_SNAKE_CASE = state_dict.pop(F'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
__SCREAMING_SNAKE_CASE = state_dict.pop(F'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__SCREAMING_SNAKE_CASE = in_proj_weight[:2_56, :]
__SCREAMING_SNAKE_CASE = in_proj_bias[:2_56]
__SCREAMING_SNAKE_CASE = in_proj_weight[2_56:5_12, :]
__SCREAMING_SNAKE_CASE = in_proj_bias[2_56:5_12]
__SCREAMING_SNAKE_CASE = in_proj_weight[-2_56:, :]
__SCREAMING_SNAKE_CASE = in_proj_bias[-2_56:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
__SCREAMING_SNAKE_CASE = state_dict.pop(F'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
__SCREAMING_SNAKE_CASE = state_dict.pop(F'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__SCREAMING_SNAKE_CASE = in_proj_weight[:2_56, :]
__SCREAMING_SNAKE_CASE = in_proj_bias[:2_56]
__SCREAMING_SNAKE_CASE = in_proj_weight[2_56:5_12, :]
__SCREAMING_SNAKE_CASE = in_proj_bias[2_56:5_12]
__SCREAMING_SNAKE_CASE = in_proj_weight[-2_56:, :]
__SCREAMING_SNAKE_CASE = in_proj_bias[-2_56:]
# read in weights + bias of input projection layer of cross-attention
__SCREAMING_SNAKE_CASE = state_dict.pop(
F'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' )
__SCREAMING_SNAKE_CASE = state_dict.pop(F'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
__SCREAMING_SNAKE_CASE = in_proj_weight_cross_attn[:2_56, :]
__SCREAMING_SNAKE_CASE = in_proj_bias_cross_attn[:2_56]
__SCREAMING_SNAKE_CASE = in_proj_weight_cross_attn[2_56:5_12, :]
__SCREAMING_SNAKE_CASE = in_proj_bias_cross_attn[2_56:5_12]
__SCREAMING_SNAKE_CASE = in_proj_weight_cross_attn[-2_56:, :]
__SCREAMING_SNAKE_CASE = in_proj_bias_cross_attn[-2_56:]
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__SCREAMING_SNAKE_CASE = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def a__ ( a__ , a__=None , a__=False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = get_detr_config(snake_case__ )
# load original model from torch hub
__SCREAMING_SNAKE_CASE = {
"""detr-resnet-50""": """detr_resnet50""",
"""detr-resnet-101""": """detr_resnet101""",
}
logger.info(F'Converting model {model_name}...' )
__SCREAMING_SNAKE_CASE = torch.hub.load("""facebookresearch/detr""" , model_name_to_original_name[model_name] , pretrained=snake_case__ ).eval()
__SCREAMING_SNAKE_CASE = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(snake_case__ ):
if is_panoptic:
__SCREAMING_SNAKE_CASE = """detr.""" + src
rename_key(snake_case__ , snake_case__ , snake_case__ )
# query, key and value matrices need special treatment
read_in_q_k_v(snake_case__ , is_panoptic=snake_case__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__SCREAMING_SNAKE_CASE = """detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
__SCREAMING_SNAKE_CASE = state_dict.pop(snake_case__ )
__SCREAMING_SNAKE_CASE = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
__SCREAMING_SNAKE_CASE = state_dict.pop(snake_case__ )
__SCREAMING_SNAKE_CASE = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
__SCREAMING_SNAKE_CASE = state_dict.pop(snake_case__ )
__SCREAMING_SNAKE_CASE = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
__SCREAMING_SNAKE_CASE = state_dict.pop(snake_case__ )
__SCREAMING_SNAKE_CASE = val
# finally, create HuggingFace model and load state dict
__SCREAMING_SNAKE_CASE = DetrForSegmentation(snake_case__ ) if is_panoptic else DetrForObjectDetection(snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
# verify our conversion on an image
__SCREAMING_SNAKE_CASE = """coco_panoptic""" if is_panoptic else """coco_detection"""
__SCREAMING_SNAKE_CASE = DetrImageProcessor(format=snake_case__ )
__SCREAMING_SNAKE_CASE = processor(images=prepare_img() , return_tensors="""pt""" )
__SCREAMING_SNAKE_CASE = encoding["""pixel_values"""]
__SCREAMING_SNAKE_CASE = detr(snake_case__ )
__SCREAMING_SNAKE_CASE = model(snake_case__ )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1E-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1E-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
processor.save_pretrained(snake_case__ )
if push_to_hub:
# Upload model and image processor to the hub
logger.info("""Uploading PyTorch model and image processor to the hub...""" )
model.push_to_hub(F'nielsr/{model_name}' )
processor.push_to_hub(F'nielsr/{model_name}' )
if __name__ == "__main__":
UpperCAmelCase : int = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='detr-resnet-50',
type=str,
choices=['detr-resnet-50', 'detr-resnet-101'],
help='Name of the DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.')
UpperCAmelCase : Dict = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 267 |
"""simple docstring"""
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
def _snake_case ( snake_case__ : str , snake_case__ : str ):
A = RobertaPreLayerNormConfig.from_pretrained(
snake_case__ , architectures=['RobertaPreLayerNormForMaskedLM'] )
# convert state_dict
A = torch.load(hf_hub_download(repo_id=snake_case__ , filename='pytorch_model.bin' ) )
A = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('roberta.' ):
A = 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ):
continue
A = tensor_value
A = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=snake_case__ , config=snake_case__ , state_dict=snake_case__ )
model.save_pretrained(snake_case__ )
# convert tokenizer
A = AutoTokenizer.from_pretrained(snake_case__ )
tokenizer.save_pretrained(snake_case__ )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint-repo''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowercase = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path) | 74 | 0 |
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_lowerCamelCase =logging.get_logger(__name__) # pylint: disable=invalid-name
class a_ ( _lowercase , _lowercase ):
"""simple docstring"""
@register_to_config
def __init__( self : Optional[Any] ,snake_case : bool ,snake_case : Optional[int] = None ,snake_case : Optional[int] = None ):
super().__init__()
SCREAMING_SNAKE_CASE =learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
SCREAMING_SNAKE_CASE =torch.zeros(A_ ,A_ )
else:
SCREAMING_SNAKE_CASE =None
SCREAMING_SNAKE_CASE =torch.nn.Parameter(A_ )
class a_ ( _lowercase ):
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 42
def __init__( self : int ,snake_case : VQModel ,snake_case : CLIPTextModel ,snake_case : CLIPTokenizer ,snake_case : TransformeraDModel ,snake_case : VQDiffusionScheduler ,snake_case : LearnedClassifierFreeSamplingEmbeddings ,):
super().__init__()
self.register_modules(
vqvae=A_ ,transformer=A_ ,text_encoder=A_ ,tokenizer=A_ ,scheduler=A_ ,learned_classifier_free_sampling_embeddings=A_ ,)
def _lowerCAmelCase ( self : Optional[int] ,snake_case : List[Any] ,snake_case : List[Any] ,snake_case : Any ):
SCREAMING_SNAKE_CASE =len(A_ ) if isinstance(A_ ,A_ ) else 1
# get prompt text embeddings
SCREAMING_SNAKE_CASE =self.tokenizer(
A_ ,padding='max_length' ,max_length=self.tokenizer.model_max_length ,return_tensors='pt' ,)
SCREAMING_SNAKE_CASE =text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
SCREAMING_SNAKE_CASE =self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f' {self.tokenizer.model_max_length} tokens: {removed_text}' )
SCREAMING_SNAKE_CASE =text_input_ids[:, : self.tokenizer.model_max_length]
SCREAMING_SNAKE_CASE =self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
SCREAMING_SNAKE_CASE =prompt_embeds / prompt_embeds.norm(dim=-1 ,keepdim=A_ )
# duplicate text embeddings for each generation per prompt
SCREAMING_SNAKE_CASE =prompt_embeds.repeat_interleave(A_ ,dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
SCREAMING_SNAKE_CASE =self.learned_classifier_free_sampling_embeddings.embeddings
SCREAMING_SNAKE_CASE =negative_prompt_embeds.unsqueeze(0 ).repeat(A_ ,1 ,1 )
else:
SCREAMING_SNAKE_CASE =[''] * batch_size
SCREAMING_SNAKE_CASE =text_input_ids.shape[-1]
SCREAMING_SNAKE_CASE =self.tokenizer(
A_ ,padding='max_length' ,max_length=A_ ,truncation=A_ ,return_tensors='pt' ,)
SCREAMING_SNAKE_CASE =self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
SCREAMING_SNAKE_CASE =negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 ,keepdim=A_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
SCREAMING_SNAKE_CASE =negative_prompt_embeds.shape[1]
SCREAMING_SNAKE_CASE =negative_prompt_embeds.repeat(1 ,A_ ,1 )
SCREAMING_SNAKE_CASE =negative_prompt_embeds.view(batch_size * num_images_per_prompt ,A_ ,-1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
SCREAMING_SNAKE_CASE =torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self : Optional[Any] ,snake_case : Union[str, List[str]] ,snake_case : int = 100 ,snake_case : float = 5.0 ,snake_case : float = 1.0 ,snake_case : int = 1 ,snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,snake_case : Optional[torch.FloatTensor] = None ,snake_case : Optional[str] = "pil" ,snake_case : bool = True ,snake_case : Optional[Callable[[int, int, torch.FloatTensor], None]] = None ,snake_case : int = 1 ,):
if isinstance(A_ ,A_ ):
SCREAMING_SNAKE_CASE =1
elif isinstance(A_ ,A_ ):
SCREAMING_SNAKE_CASE =len(A_ )
else:
raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(A_ )}' )
SCREAMING_SNAKE_CASE =batch_size * num_images_per_prompt
SCREAMING_SNAKE_CASE =guidance_scale > 1.0
SCREAMING_SNAKE_CASE =self._encode_prompt(A_ ,A_ ,A_ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A_ ,A_ ) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(A_ )}.' )
# get the initial completely masked latents unless the user supplied it
SCREAMING_SNAKE_CASE =(batch_size, self.transformer.num_latent_pixels)
if latents is None:
SCREAMING_SNAKE_CASE =self.transformer.num_vector_embeds - 1
SCREAMING_SNAKE_CASE =torch.full(A_ ,A_ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'
f' {self.transformer.num_vector_embeds - 1} (inclusive).' )
SCREAMING_SNAKE_CASE =latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(A_ ,device=self.device )
SCREAMING_SNAKE_CASE =self.scheduler.timesteps.to(self.device )
SCREAMING_SNAKE_CASE =latents
for i, t in enumerate(self.progress_bar(A_ ) ):
# expand the sample if we are doing classifier free guidance
SCREAMING_SNAKE_CASE =torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
SCREAMING_SNAKE_CASE =self.transformer(A_ ,encoder_hidden_states=A_ ,timestep=A_ ).sample
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =model_output.chunk(2 )
SCREAMING_SNAKE_CASE =model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(A_ ,dim=1 ,keepdim=A_ )
SCREAMING_SNAKE_CASE =self.truncate(A_ ,A_ )
# remove `log(0)`'s (`-inf`s)
SCREAMING_SNAKE_CASE =model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE =self.scheduler.step(A_ ,timestep=A_ ,sample=A_ ,generator=A_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A_ ,A_ ,A_ )
SCREAMING_SNAKE_CASE =self.vqvae.config.vq_embed_dim
SCREAMING_SNAKE_CASE =(batch_size, self.transformer.height, self.transformer.width, embedding_channels)
SCREAMING_SNAKE_CASE =self.vqvae.quantize.get_codebook_entry(A_ ,shape=A_ )
SCREAMING_SNAKE_CASE =self.vqvae.decode(A_ ,force_not_quantize=A_ ).sample
SCREAMING_SNAKE_CASE =(image / 2 + 0.5).clamp(0 ,1 )
SCREAMING_SNAKE_CASE =image.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE =self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ )
def _lowerCAmelCase ( self : Any ,snake_case : torch.FloatTensor ,snake_case : float ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =torch.sort(A_ ,1 ,descending=A_ )
SCREAMING_SNAKE_CASE =torch.exp(A_ )
SCREAMING_SNAKE_CASE =sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
SCREAMING_SNAKE_CASE =torch.full_like(keep_mask[:, 0:1, :] ,A_ )
SCREAMING_SNAKE_CASE =torch.cat((all_true, keep_mask) ,dim=1 )
SCREAMING_SNAKE_CASE =keep_mask[:, :-1, :]
SCREAMING_SNAKE_CASE =keep_mask.gather(1 ,indices.argsort(1 ) )
SCREAMING_SNAKE_CASE =log_p_x_0.clone()
SCREAMING_SNAKE_CASE =-torch.inf # -inf = log(0)
return rv
| 334 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'''
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[Any] = '''roformer'''
def __init__( self : Tuple ,A_ : Optional[int]=5_0000 ,A_ : Tuple=None ,A_ : Optional[Any]=768 ,A_ : Dict=12 ,A_ : Optional[int]=12 ,A_ : Union[str, Any]=3072 ,A_ : Dict="gelu" ,A_ : Dict=0.1 ,A_ : List[Any]=0.1 ,A_ : List[Any]=1536 ,A_ : List[str]=2 ,A_ : Any=0.02 ,A_ : str=1e-12 ,A_ : Optional[int]=0 ,A_ : List[str]=False ,A_ : Tuple=True ,**A_ : List[str] ,) -> Dict:
super().__init__(pad_token_id=A_ ,**A_ )
A = vocab_size
A = hidden_size if embedding_size is None else embedding_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = hidden_act
A = intermediate_size
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = initializer_range
A = layer_norm_eps
A = rotary_value
A = use_cache
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
A = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A = {0: 'batch', 1: 'sequence'}
A = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] ) | 74 | 0 |
"""simple docstring"""
def UpperCAmelCase ( UpperCamelCase__ = 200 ):
"""simple docstring"""
A__ = [1, 2, 5, 10, 20, 50, 100, 200]
A__ = [0] * (pence + 1)
A__ = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(snake_case__ , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_00) == 7_36_82
| 221 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def _snake_case ( snake_case__ : Dict ):
A = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
'decoder.output_projection.weight',
]
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def _snake_case ( snake_case__ : int ):
A , A = emb.weight.shape
A = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ )
A = emb.weight.data
return lin_layer
def _snake_case ( snake_case__ : List[str] , snake_case__ : Any="facebook/mbart-large-en-ro" , snake_case__ : Optional[int]=False , snake_case__ : List[str]=False ):
A = torch.load(snake_case__ , map_location='cpu' )['model']
remove_ignore_keys_(snake_case__ )
A = state_dict['encoder.embed_tokens.weight'].shape[0]
A = MBartConfig.from_pretrained(snake_case__ , vocab_size=snake_case__ )
if mbart_aa and finetuned:
A = 'relu'
A = state_dict['decoder.embed_tokens.weight']
A = MBartForConditionalGeneration(snake_case__ )
model.model.load_state_dict(snake_case__ )
if finetuned:
A = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
_lowercase = parser.parse_args()
_lowercase = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path) | 74 | 0 |
"""simple docstring"""
from __future__ import annotations
SCREAMING_SNAKE_CASE : int = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def lowercase ( _snake_case : list[list[int]] , _snake_case : list[int] , _snake_case : list[int] , _snake_case : int , _snake_case : list[list[int]] , ) ->List[str]:
"""simple docstring"""
__snake_case : Dict = [
[0 for col in range(len(grid[0] ) )] for row in range(len(snake_case__ ) )
] # the reference grid
__snake_case : str = 1
__snake_case : List[Any] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(snake_case__ ) )
] # the action grid
__snake_case : Dict = init[0]
__snake_case : str = init[1]
__snake_case : List[str] = 0
__snake_case : Union[str, Any] = g + heuristic[x][y] # cost from starting cell to destination cell
__snake_case : Optional[Any] = [[f, g, x, y]]
__snake_case : Tuple = False # flag that is set when search is complete
__snake_case : Dict = False # flag set if we can't find expand
while not found and not resign:
if len(snake_case__ ) == 0:
raise ValueError('''Algorithm is unable to find solution''' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__snake_case : str = cell.pop()
__snake_case : Tuple = next_cell[2]
__snake_case : Union[str, Any] = next_cell[3]
__snake_case : Optional[Any] = next_cell[1]
if x == goal[0] and y == goal[1]:
__snake_case : Any = True
else:
for i in range(len(snake_case__ ) ): # to try out different valid actions
__snake_case : Tuple = x + DIRECTIONS[i][0]
__snake_case : Dict = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(snake_case__ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__snake_case : List[str] = g + cost
__snake_case : Optional[int] = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__snake_case : List[Any] = 1
__snake_case : int = i
__snake_case : Dict = []
__snake_case : Tuple = goal[0]
__snake_case : Optional[int] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__snake_case : Dict = x - DIRECTIONS[action[x][y]][0]
__snake_case : Union[str, Any] = y - DIRECTIONS[action[x][y]][1]
__snake_case : List[str] = xa
__snake_case : Union[str, Any] = ya
invpath.append([x, y] )
__snake_case : Dict = []
for i in range(len(snake_case__ ) ):
path.append(invpath[len(snake_case__ ) - 1 - i] )
return path, action
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Union[str, Any] = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
SCREAMING_SNAKE_CASE : Optional[int] = [0, 0]
# all coordinates are given in format [y,x]
SCREAMING_SNAKE_CASE : List[Any] = [len(grid) - 1, len(grid[0]) - 1]
SCREAMING_SNAKE_CASE : Dict = 1
# the cost map which pushes the path closer to the goal
SCREAMING_SNAKE_CASE : Optional[int] = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
SCREAMING_SNAKE_CASE : str = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
SCREAMING_SNAKE_CASE : Union[str, Any] = 99
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = search(grid, init, goal, cost, heuristic)
print("""ACTION MAP""")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 102 |
"""simple docstring"""
import argparse
import struct
import unittest
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Tuple ,A_ : bytes ) -> None:
A = data
# Initialize hash values
A = [
0X6_A_0_9_E_6_6_7,
0XB_B_6_7_A_E_8_5,
0X3_C_6_E_F_3_7_2,
0XA_5_4_F_F_5_3_A,
0X5_1_0_E_5_2_7_F,
0X9_B_0_5_6_8_8_C,
0X1_F_8_3_D_9_A_B,
0X5_B_E_0_C_D_1_9,
]
# Initialize round constants
A = [
0X4_2_8_A_2_F_9_8,
0X7_1_3_7_4_4_9_1,
0XB_5_C_0_F_B_C_F,
0XE_9_B_5_D_B_A_5,
0X3_9_5_6_C_2_5_B,
0X5_9_F_1_1_1_F_1,
0X9_2_3_F_8_2_A_4,
0XA_B_1_C_5_E_D_5,
0XD_8_0_7_A_A_9_8,
0X1_2_8_3_5_B_0_1,
0X2_4_3_1_8_5_B_E,
0X5_5_0_C_7_D_C_3,
0X7_2_B_E_5_D_7_4,
0X8_0_D_E_B_1_F_E,
0X9_B_D_C_0_6_A_7,
0XC_1_9_B_F_1_7_4,
0XE_4_9_B_6_9_C_1,
0XE_F_B_E_4_7_8_6,
0X0_F_C_1_9_D_C_6,
0X2_4_0_C_A_1_C_C,
0X2_D_E_9_2_C_6_F,
0X4_A_7_4_8_4_A_A,
0X5_C_B_0_A_9_D_C,
0X7_6_F_9_8_8_D_A,
0X9_8_3_E_5_1_5_2,
0XA_8_3_1_C_6_6_D,
0XB_0_0_3_2_7_C_8,
0XB_F_5_9_7_F_C_7,
0XC_6_E_0_0_B_F_3,
0XD_5_A_7_9_1_4_7,
0X0_6_C_A_6_3_5_1,
0X1_4_2_9_2_9_6_7,
0X2_7_B_7_0_A_8_5,
0X2_E_1_B_2_1_3_8,
0X4_D_2_C_6_D_F_C,
0X5_3_3_8_0_D_1_3,
0X6_5_0_A_7_3_5_4,
0X7_6_6_A_0_A_B_B,
0X8_1_C_2_C_9_2_E,
0X9_2_7_2_2_C_8_5,
0XA_2_B_F_E_8_A_1,
0XA_8_1_A_6_6_4_B,
0XC_2_4_B_8_B_7_0,
0XC_7_6_C_5_1_A_3,
0XD_1_9_2_E_8_1_9,
0XD_6_9_9_0_6_2_4,
0XF_4_0_E_3_5_8_5,
0X1_0_6_A_A_0_7_0,
0X1_9_A_4_C_1_1_6,
0X1_E_3_7_6_C_0_8,
0X2_7_4_8_7_7_4_C,
0X3_4_B_0_B_C_B_5,
0X3_9_1_C_0_C_B_3,
0X4_E_D_8_A_A_4_A,
0X5_B_9_C_C_A_4_F,
0X6_8_2_E_6_F_F_3,
0X7_4_8_F_8_2_E_E,
0X7_8_A_5_6_3_6_F,
0X8_4_C_8_7_8_1_4,
0X8_C_C_7_0_2_0_8,
0X9_0_B_E_F_F_F_A,
0XA_4_5_0_6_C_E_B,
0XB_E_F_9_A_3_F_7,
0XC_6_7_1_7_8_F_2,
]
A = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : bytes ) -> bytes:
A = B'\x80' + (B'\x00' * (63 - (len(A_ ) + 8) % 64))
A = struct.pack('>Q' ,(len(A_ ) * 8) )
return data + padding + big_endian_integer
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> None:
# Convert into blocks of 64 bytes
A = [
self.preprocessed_data[x : x + 64]
for x in range(0 ,len(self.preprocessed_data ) ,64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
A = list(struct.unpack('>16L' ,A_ ) )
# add 48 0-ed integers
words += [0] * 48
A , A , A , A , A , A , A , A = self.hashes
for index in range(0 ,64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
A = (
self.ror(words[index - 15] ,7 )
^ self.ror(words[index - 15] ,18 )
^ (words[index - 15] >> 3)
)
A = (
self.ror(words[index - 2] ,17 )
^ self.ror(words[index - 2] ,19 )
^ (words[index - 2] >> 10)
)
A = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X1_0_0_0_0_0_0_0_0
# Compression
A = self.ror(A_ ,6 ) ^ self.ror(A_ ,11 ) ^ self.ror(A_ ,25 )
A = (e & f) ^ ((~e & 0XF_F_F_F_F_F_F_F) & g)
A = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X1_0_0_0_0_0_0_0_0
A = self.ror(A_ ,2 ) ^ self.ror(A_ ,13 ) ^ self.ror(A_ ,22 )
A = (a & b) ^ (a & c) ^ (b & c)
A = (sa + maj) % 0X1_0_0_0_0_0_0_0_0
A , A , A , A , A , A , A , A = (
g,
f,
e,
((d + tempa) % 0X1_0_0_0_0_0_0_0_0),
c,
b,
a,
((tempa + tempa) % 0X1_0_0_0_0_0_0_0_0),
)
A = [a, b, c, d, e, f, g, h]
# Modify final values
A = [
((element + mutated_hash_values[index]) % 0X1_0_0_0_0_0_0_0_0)
for index, element in enumerate(self.hashes )
]
A = ''.join([hex(A_ )[2:].zfill(8 ) for value in self.hashes] )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : int ,A_ : int ) -> int:
return 0XF_F_F_F_F_F_F_F & (value << (32 - rotations)) | (value >> rotations)
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> None:
import hashlib
A = bytes('Test String' ,'utf-8' )
self.assertEqual(SHAaaa(A_ ).hash ,hashlib.shaaaa(A_ ).hexdigest() )
def _snake_case ( ):
import doctest
doctest.testmod()
A = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
A = parser.parse_args()
A = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
A = f.read()
else:
A = bytes(snake_case__ , 'utf-8' )
print(SHAaaa(snake_case__ ).hash )
if __name__ == "__main__":
main() | 74 | 0 |
from graphs.minimum_spanning_tree_kruskal import kruskal
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :int = 9
__UpperCamelCase :int = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
__UpperCamelCase :str = kruskal(snake_case__ , snake_case__ )
__UpperCamelCase :Tuple = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(snake_case__ ) == sorted(snake_case__ )
| 43 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowercase = {'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''DeiTFeatureExtractor''']
_lowercase = ['''DeiTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DeiTForImageClassification''',
'''DeiTForImageClassificationWithTeacher''',
'''DeiTForMaskedImageModeling''',
'''DeiTModel''',
'''DeiTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDeiTForImageClassification''',
'''TFDeiTForImageClassificationWithTeacher''',
'''TFDeiTForMaskedImageModeling''',
'''TFDeiTModel''',
'''TFDeiTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 74 | 0 |
import warnings
from .generation import TFGenerationMixin
class __magic_name__ ( _lowercase):
warnings.warn(
"Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will "
"be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead." , _lowercase , )
| 146 |
"""simple docstring"""
from __future__ import annotations
import requests
def _snake_case ( snake_case__ : str ):
A = F'https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'
return requests.get(snake_case__ ).json()
def _snake_case ( snake_case__ : int = 10 ):
A = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'
A = requests.get(snake_case__ ).json()[:max_stories]
return [get_hackernews_story(snake_case__ ) for story_id in story_ids]
def _snake_case ( snake_case__ : int = 10 ):
A = hackernews_top_stories(snake_case__ )
return "\n".join('* [{title}]({url})'.format(**snake_case__ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown()) | 74 | 0 |
'''simple docstring'''
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : int ):
"""simple docstring"""
self.assertEqual(len(A_ ) , len(A_ ) )
for a, b in zip(A_ , A_ ):
self.assertAlmostEqual(A_ , A_ , delta=A_ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(A_ ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = None
ops.enable_eager_execution_internal()
UpperCAmelCase__ = tf.config.list_physical_devices("""CPU""" )
if len(A_ ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
UpperCAmelCase__ = tf.config.list_logical_devices(device_type="""CPU""" )
UpperCAmelCase__ = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
UpperCAmelCase__ = GradientAccumulator()
UpperCAmelCase__ = tf.Variable([4.0, 3.0] )
UpperCAmelCase__ , UpperCAmelCase__ = create_optimizer(5E-5 , 10 , 5 )
UpperCAmelCase__ = tf.Variable([0.0, 0.0] , trainable=A_ )
def accumulate_on_replica(_UpperCAmelCase : int ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(_UpperCAmelCase : Dict , _UpperCAmelCase : str ):
with strategy.scope():
UpperCAmelCase__ = strategy.experimental_local_results(A_ )
local_variables[0].assign(A_ )
local_variables[1].assign(A_ )
strategy.run(A_ , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(A_ )
def _check_local_values(_UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] ):
UpperCAmelCase__ = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , A_ , tol=1E-2 )
self.assertListAlmostEqual(values[1].value() , A_ , tol=1E-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 346 |
"""simple docstring"""
from string import ascii_uppercase
_lowercase = {char: i for i, char in enumerate(ascii_uppercase)}
_lowercase = dict(enumerate(ascii_uppercase))
def _snake_case ( snake_case__ : str , snake_case__ : str ):
A = len(snake_case__ )
A = 0
while True:
if x == i:
A = 0
if len(snake_case__ ) == len(snake_case__ ):
break
key += key[i]
i += 1
return key
def _snake_case ( snake_case__ : str , snake_case__ : str ):
A = ''
A = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
A = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def _snake_case ( snake_case__ : str , snake_case__ : str ):
A = ''
A = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
A = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def _snake_case ( ):
A = 'THE GERMAN ATTACK'
A = 'SECRET'
A = generate_key(snake_case__ , snake_case__ )
A = cipher_text(snake_case__ , snake_case__ )
print(F'Encrypted Text = {s}' )
print(F'Original Text = {original_text(snake_case__ , snake_case__ )}' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 74 | 0 |
"""simple docstring"""
import math
from collections.abc import Iterator
from itertools import takewhile
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCAmelCase ( ):
"""simple docstring"""
__A = 2
while True:
if is_prime(snake_case__ ):
yield num
num += 1
def lowerCAmelCase ( __UpperCamelCase = 2_0_0_0_0_0_0 ):
"""simple docstring"""
return sum(takewhile(lambda __UpperCamelCase : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 266 |
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : List[Any] ) -> Optional[Any]:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] ,model_result['ss'] ):
A = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(A_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
A = 'sshleifer/tiny-gpt2'
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
A = 'sgugger/tiny-distilbert-classification'
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,only_pretrain_model=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
A = 'sshleifer/tiny-gpt2'
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,torchscript=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == 'cpu' ,'Cant do half precision' )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
A = 'sshleifer/tiny-gpt2'
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,fpaa=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
A = 'sshleifer/tiny-gpt2'
A = AutoConfig.from_pretrained(A_ )
# set architectures equal to `None`
A = None
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ ,configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
A = 'sshleifer/tiny-gpt2'
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == 'cpu' ,'Can\'t do half precision' )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
A = 'sshleifer/tiny-gpt2'
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,fpaa=A_ ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
A = 'sshleifer/tiny-gpt2'
A = AutoConfig.from_pretrained(A_ )
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ ,configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
A = 'sshleifer/tinier_bart'
A = AutoConfig.from_pretrained(A_ )
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ ,configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
A = 'sshleifer/tiny-gpt2'
A = AutoConfig.from_pretrained(A_ )
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ ,configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
A = 'sshleifer/tinier_bart'
A = AutoConfig.from_pretrained(A_ )
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ ,configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
A = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,save_to_csv=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(A_ ,'inf_time.csv' ) ,train_memory_csv_file=os.path.join(A_ ,'train_mem.csv' ) ,inference_memory_csv_file=os.path.join(A_ ,'inf_mem.csv' ) ,train_time_csv_file=os.path.join(A_ ,'train_time.csv' ) ,env_info_csv_file=os.path.join(A_ ,'env.csv' ) ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
benchmark.run()
self.assertTrue(Path(os.path.join(A_ ,'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(A_ ,'train_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(A_ ,'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(A_ ,'train_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(A_ ,'env.csv' ) ).exists() )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
A = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(A_ : Optional[int] ):
self.assertTrue(hasattr(A_ ,'sequential' ) )
self.assertTrue(hasattr(A_ ,'cumulative' ) )
self.assertTrue(hasattr(A_ ,'current' ) )
self.assertTrue(hasattr(A_ ,'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(A_ ,'log.txt' ) ,log_print=A_ ,trace_memory_line_by_line=A_ ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(A_ ,'log.txt' ) ).exists() ) | 74 | 0 |
from manim import *
class a__ ( _lowercase ):
"""simple docstring"""
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
A__ = Rectangle(height=0.5 , width=0.5 )
A__ = Rectangle(height=0.25 , width=0.25 )
A__ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
A__ = [mem.copy() for i in range(6 )]
A__ = [mem.copy() for i in range(6 )]
A__ = VGroup(*A_ ).arrange(A_ , buff=0 )
A__ = VGroup(*A_ ).arrange(A_ , buff=0 )
A__ = VGroup(A_ , A_ ).arrange(A_ , buff=0 )
A__ = Text("CPU" , font_size=24 )
A__ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(A_ )
A__ = [mem.copy() for i in range(4 )]
A__ = VGroup(*A_ ).arrange(A_ , buff=0 )
A__ = Text("GPU" , font_size=24 )
A__ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
gpu.move_to([-1, -1, 0] )
self.add(A_ )
A__ = [mem.copy() for i in range(6 )]
A__ = VGroup(*A_ ).arrange(A_ , buff=0 )
A__ = Text("Model" , font_size=24 )
A__ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
model.move_to([3, -1.0, 0] )
self.add(A_ )
A__ = []
A__ = []
A__ = []
for i, rect in enumerate(A_ ):
rect.set_stroke(A_ )
A__ = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(A_ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=A_ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=A_ , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=A_ , buff=0.0 )
self.add(A_ )
model_cpu_arr.append(A_ )
self.add(*A_ , *A_ , *A_ )
A__ = [mem.copy() for i in range(6 )]
A__ = VGroup(*A_ ).arrange(A_ , buff=0 )
A__ = Text("Loaded Checkpoint" , font_size=24 )
A__ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
checkpoint.move_to([3, 0.5, 0] )
self.add(A_ )
A__ = []
A__ = []
for i, rect in enumerate(A_ ):
A__ = fill.copy().set_fill(A_ , opacity=0.7 )
target.move_to(A_ )
ckpt_arr.append(A_ )
A__ = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(A_ )
self.add(*A_ , *A_ )
A__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
A__ = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(A_ , A_ )
A__ = MarkupText(
F'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(A_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(A_ )
A__ = MarkupText(
F'Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
A__ = [meta_mem.copy() for i in range(6 )]
A__ = [meta_mem.copy() for i in range(6 )]
A__ = VGroup(*A_ ).arrange(A_ , buff=0 )
A__ = VGroup(*A_ ).arrange(A_ , buff=0 )
A__ = VGroup(A_ , A_ ).arrange(A_ , buff=0 )
A__ = Text("Disk" , font_size=24 )
A__ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(A_ , run_time=3 ) , Write(A_ , run_time=1 ) , Create(A_ , run_time=1 ) )
A__ = []
for i, rect in enumerate(A_ ):
A__ = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(A_ , run_time=1.5 ) )
self.play(*A_ )
self.play(FadeOut(A_ ) )
A__ = MarkupText(F'Then, the checkpoint is removed from memory\nthrough garbage collection.' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(A_ , run_time=3 ) )
self.play(
FadeOut(A_ , A_ , *A_ , *A_ ) , )
self.wait()
| 68 |
"""simple docstring"""
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
_lowercase = re.compile(r'''^(?P<major>\d+)''' r'''\.(?P<minor>\d+)''' r'''\.(?P<patch>\d+)$''')
@total_ordering
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
_lowerCamelCase: str
_lowerCamelCase: Optional[str] = None
_lowerCamelCase: Optional[Union[str, int]] = None
_lowerCamelCase: Optional[Union[str, int]] = None
_lowerCamelCase: Optional[Union[str, int]] = None
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
A , A , A = _str_to_version_tuple(self.version_str )
def __repr__( self : Optional[int] ) -> Dict:
return F'{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
return self.major, self.minor, self.patch
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Tuple ) -> Union[str, Any]:
if isinstance(A_ ,A_ ):
return Version(A_ )
elif isinstance(A_ ,A_ ):
return other
raise TypeError(F'{other} (type {type(A_ )}) cannot be compared to version.' )
def __eq__( self : List[Any] ,A_ : Dict ) -> Any:
try:
A = self._validate_operand(A_ )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self : List[Any] ,A_ : Optional[int] ) -> Tuple:
A = self._validate_operand(A_ )
return self.tuple < other.tuple
def __hash__( self : Union[str, Any] ) -> Union[str, Any]:
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any ,A_ : List[str] ) -> List[str]:
A = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
return self.version_str
def _snake_case ( snake_case__ : List[str] ):
A = _VERSION_REG.match(snake_case__ )
if not res:
raise ValueError(F'Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.' )
return tuple(int(snake_case__ ) for v in [res.group('major' ), res.group('minor' ), res.group('patch' )] )
def _snake_case ( snake_case__ : str ):
return ".".join(str(snake_case__ ) for v in version_tuple ) | 74 | 0 |
"""simple docstring"""
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
lowerCAmelCase = re.compile(r"""^(?P<major>\d+)""" r"""\.(?P<minor>\d+)""" r"""\.(?P<patch>\d+)$""")
@total_ordering
@dataclass
class A_ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
def UpperCAmelCase__ ( self :List[str] ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] =_str_to_version_tuple(self.version_str )
def __repr__( self :Optional[int] ):
"""simple docstring"""
return f"""{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"""
@property
def UpperCAmelCase__ ( self :Optional[int] ):
"""simple docstring"""
return self.major, self.minor, self.patch
def UpperCAmelCase__ ( self :Optional[int] , lowerCamelCase_ :Tuple ):
"""simple docstring"""
if isinstance(A_ , A_ ):
return Version(A_ )
elif isinstance(A_ , A_ ):
return other
raise TypeError(f"""{other} (type {type(A_ )}) cannot be compared to version.""" )
def __eq__( self :List[Any] , lowerCamelCase_ :Dict ):
"""simple docstring"""
try:
lowerCamelCase__ : Union[str, Any] =self._validate_operand(A_ )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self :List[Any] , lowerCamelCase_ :Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : List[str] =self._validate_operand(A_ )
return self.tuple < other.tuple
def __hash__( self :Union[str, Any] ):
"""simple docstring"""
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def UpperCAmelCase__ ( cls :Any , lowerCamelCase_ :List[str] ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] ={f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def UpperCAmelCase__ ( self :Optional[int] ):
"""simple docstring"""
return self.version_str
def lowerCAmelCase_ ( snake_case_ : List[str] ) ->Tuple:
lowerCamelCase__ : Union[str, Any] =_VERSION_REG.match(snake_case__ )
if not res:
raise ValueError(f"""Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.""" )
return tuple(int(snake_case__ ) for v in [res.group('major' ), res.group('minor' ), res.group('patch' )] )
def lowerCAmelCase_ ( snake_case_ : str ) ->List[str]:
return ".".join(str(snake_case__ ) for v in version_tuple ) | 126 |
"""simple docstring"""
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
_lowercase = NewType('''DataClass''', Any)
_lowercase = NewType('''DataClassType''', Any)
def _snake_case ( snake_case__ : Tuple ):
if isinstance(snake_case__ , snake_case__ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F'Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).' )
def _snake_case ( snake_case__ : list ):
A = {str(snake_case__ ): choice for choice in choices}
return lambda snake_case__ : str_to_choice.get(snake_case__ , snake_case__ )
def _snake_case ( *,
snake_case__ : Union[str, List[str]] = None , snake_case__ : str = None , snake_case__ : Any = dataclasses.MISSING , snake_case__ : Callable[[], Any] = dataclasses.MISSING , snake_case__ : dict = None , **snake_case__ : Any , ):
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
A = {}
if aliases is not None:
A = aliases
if help is not None:
A = help
return dataclasses.field(metadata=snake_case__ , default=snake_case__ , default_factory=snake_case__ , **snake_case__ )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Iterable[DataClassType]
def __init__( self : List[str] ,A_ : Union[DataClassType, Iterable[DataClassType]] ,**A_ : Any ) -> Optional[int]:
# To make the default appear when using --help
if "formatter_class" not in kwargs:
A = ArgumentDefaultsHelpFormatter
super().__init__(**A_ )
if dataclasses.is_dataclass(A_ ):
A = [dataclass_types]
A = list(A_ )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(A_ )
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : ArgumentParser ,A_ : dataclasses.Field ) -> Optional[Any]:
A = F'--{field.name}'
A = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type ,A_ ):
raise RuntimeError(
'Unresolved type detected, which should have been done with the help of '
'`typing.get_type_hints` method by default' )
A = kwargs.pop('aliases' ,[] )
if isinstance(A_ ,A_ ):
A = [aliases]
A = getattr(field.type ,'__origin__' ,field.type )
if origin_type is Union or (hasattr(A_ ,'UnionType' ) and isinstance(A_ ,types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(A_ ) not in field.type.__args__
):
raise ValueError(
'Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'
' the argument parser only supports one type per argument.'
F' Problem encountered in field \'{field.name}\'.' )
if type(A_ ) not in field.type.__args__:
# filter `str` in Union
A = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
A = getattr(field.type ,'__origin__' ,field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
A = (
field.type.__args__[0] if isinstance(A_ ,field.type.__args__[1] ) else field.type.__args__[1]
)
A = getattr(field.type ,'__origin__' ,field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
A = {}
if origin_type is Literal or (isinstance(field.type ,A_ ) and issubclass(field.type ,A_ )):
if origin_type is Literal:
A = field.type.__args__
else:
A = [x.value for x in field.type]
A = make_choice_type_function(kwargs['choices'] )
if field.default is not dataclasses.MISSING:
A = field.default
else:
A = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
A = copy(A_ )
# Hack because type=bool in argparse does not behave as we want.
A = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
A = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
A = default
# This tells argparse we accept 0 or 1 value after --field_name
A = '?'
# This is the value that will get picked if we do --field_name (without value)
A = True
elif isclass(A_ ) and issubclass(A_ ,A_ ):
A = field.type.__args__[0]
A = '+'
if field.default_factory is not dataclasses.MISSING:
A = field.default_factory()
elif field.default is dataclasses.MISSING:
A = True
else:
A = field.type
if field.default is not dataclasses.MISSING:
A = field.default
elif field.default_factory is not dataclasses.MISSING:
A = field.default_factory()
else:
A = True
parser.add_argument(A_ ,*A_ ,**A_ )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
A = False
parser.add_argument(F'--no_{field.name}' ,action='store_false' ,dest=field.name ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : DataClassType ) -> List[Any]:
if hasattr(A_ ,'_argument_group_name' ):
A = self.add_argument_group(dtype._argument_group_name )
else:
A = self
try:
A = get_type_hints(A_ )
except NameError:
raise RuntimeError(
F'Type resolution failed for {dtype}. Try declaring the class in global scope or '
'removing line of `from __future__ import annotations` which opts in Postponed '
'Evaluation of Annotations (PEP 563)' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(A_ ):
A = '.'.join(map(A_ ,sys.version_info[:3] ) )
raise RuntimeError(
F'Type resolution failed for {dtype} on Python {python_version}. Try removing '
'line of `from __future__ import annotations` which opts in union types as '
'`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '
'support Python versions that lower than 3.10, you need to use '
'`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '
'`X | None`.' ) from ex
raise
for field in dataclasses.fields(A_ ):
if not field.init:
continue
A = type_hints[field.name]
self._parse_dataclass_field(A_ ,A_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Any=None ,A_ : int=False ,A_ : Any=True ,A_ : List[str]=None ,A_ : Union[str, Any]=None ,) -> Tuple[DataClass, ...]:
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
A = []
if args_filename:
args_files.append(Path(A_ ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('.args' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
A = ArgumentParser()
args_file_parser.add_argument(A_ ,type=A_ ,action='append' )
# Use only remaining args for further parsing (remove the args_file_flag)
A , A = args_file_parser.parse_known_args(args=A_ )
A = vars(A_ ).get(args_file_flag.lstrip('-' ) ,A_ )
if cmd_args_file_paths:
args_files.extend([Path(A_ ) for p in cmd_args_file_paths] )
A = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
A = file_args + args if args is not None else file_args + sys.argv[1:]
A , A = self.parse_known_args(args=A_ )
A = []
for dtype in self.dataclass_types:
A = {f.name for f in dataclasses.fields(A_ ) if f.init}
A = {k: v for k, v in vars(A_ ).items() if k in keys}
for k in keys:
delattr(A_ ,A_ )
A = dtype(**A_ )
outputs.append(A_ )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(A_ )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'Some specified arguments are not used by the HfArgumentParser: {remaining_args}' )
return (*outputs,)
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Dict[str, Any] ,A_ : bool = False ) -> Tuple[DataClass, ...]:
A = set(args.keys() )
A = []
for dtype in self.dataclass_types:
A = {f.name for f in dataclasses.fields(A_ ) if f.init}
A = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
A = dtype(**A_ )
outputs.append(A_ )
if not allow_extra_keys and unused_keys:
raise ValueError(F'Some keys are not used by the HfArgumentParser: {sorted(A_ )}' )
return tuple(A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : bool = False ) -> Tuple[DataClass, ...]:
with open(Path(A_ ) ,encoding='utf-8' ) as open_json_file:
A = json.loads(open_json_file.read() )
A = self.parse_dict(A_ ,allow_extra_keys=A_ )
return tuple(A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : bool = False ) -> Tuple[DataClass, ...]:
A = self.parse_dict(yaml.safe_load(Path(A_ ).read_text() ) ,allow_extra_keys=A_ )
return tuple(A_ ) | 74 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __magic_name__ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = KandinskyVaaImgaImgPipeline
lowerCamelCase__ : Tuple = ['''image_embeds''', '''negative_image_embeds''', '''image''']
lowerCamelCase__ : Any = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
lowerCamelCase__ : int = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
lowerCamelCase__ : Dict = False
@property
def _UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
return 32
@property
def _UpperCAmelCase ( self ) -> str:
"""simple docstring"""
return 32
@property
def _UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
return self.time_input_dim
@property
def _UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def _UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
return 100
@property
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
torch.manual_seed(0 )
a__ ={
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
a__ =UNetaDConditionModel(**A_ )
return model
@property
def _UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
a__ =VQModel(**self.dummy_movq_kwargs )
return model
def _UpperCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
a__ =self.dummy_unet
a__ =self.dummy_movq
a__ ={
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.00085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
a__ =DDIMScheduler(**A_ )
a__ ={
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def _UpperCAmelCase ( self, lowercase_, lowercase_=0 ) -> Optional[int]:
"""simple docstring"""
a__ =floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(A_ ) ).to(A_ )
a__ =floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1 ) ).to(
A_ )
# create init_image
a__ =floats_tensor((1, 3, 64, 64), rng=random.Random(A_ ) ).to(A_ )
a__ =image.cpu().permute(0, 2, 3, 1 )[0]
a__ =Image.fromarray(np.uinta(A_ ) ).convert('''RGB''' ).resize((256, 256) )
if str(A_ ).startswith('''mps''' ):
a__ =torch.manual_seed(A_ )
else:
a__ =torch.Generator(device=A_ ).manual_seed(A_ )
a__ ={
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def _UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
a__ ='''cpu'''
a__ =self.get_dummy_components()
a__ =self.pipeline_class(**A_ )
a__ =pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
a__ =pipe(**self.get_dummy_inputs(A_ ) )
a__ =output.images
a__ =pipe(
**self.get_dummy_inputs(A_ ), return_dict=A_, )[0]
a__ =image[0, -3:, -3:, -1]
a__ =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a__ =np.array(
[0.6199778, 0.63984406, 0.46145785, 0.62944984, 0.5622215, 0.47306132, 0.47441456, 0.4607606, 0.48719263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
a__ =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_img2img_frog.npy''' )
a__ =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
a__ ='''A red cartoon frog, 4k'''
a__ =KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''', torch_dtype=torch.floataa )
pipe_prior.to(A_ )
a__ =KandinskyVaaImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''', torch_dtype=torch.floataa )
a__ =pipeline.to(A_ )
pipeline.set_progress_bar_config(disable=A_ )
a__ =torch.Generator(device='''cpu''' ).manual_seed(0 )
a__, a__ =pipe_prior(
A_, generator=A_, num_inference_steps=5, negative_prompt='''''', ).to_tuple()
a__ =pipeline(
image=A_, image_embeds=A_, negative_image_embeds=A_, generator=A_, num_inference_steps=100, height=768, width=768, strength=0.2, output_type='''np''', )
a__ =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(A_, A_ )
| 188 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_lowercase = 16
_lowercase = 32
def _snake_case ( snake_case__ : Accelerator , snake_case__ : int = 16 , snake_case__ : str = "bert-base-cased" ):
A = AutoTokenizer.from_pretrained(snake_case__ )
A = load_dataset('glue' , 'mrpc' )
def tokenize_function(snake_case__ : Dict ):
# max_length=None => use the model max length (it's actually the default)
A = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=snake_case__ , max_length=snake_case__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
A = datasets.map(
snake_case__ , batched=snake_case__ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=snake_case__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(snake_case__ : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case__ , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(snake_case__ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
A = DataLoader(
tokenized_datasets['train'] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
A = DataLoader(
tokenized_datasets['validation'] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
return train_dataloader, eval_dataloader
def _snake_case ( snake_case__ : Optional[int] , snake_case__ : Optional[int] ):
# Initialize accelerator
A = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A = config['lr']
A = int(config['num_epochs'] )
A = int(config['seed'] )
A = int(config['batch_size'] )
A = args.model_name_or_path
set_seed(snake_case__ )
A , A = get_dataloaders(snake_case__ , snake_case__ , snake_case__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A = AutoModelForSequenceClassification.from_pretrained(snake_case__ , return_dict=snake_case__ )
# Instantiate optimizer
A = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
A = optimizer_cls(params=model.parameters() , lr=snake_case__ )
if accelerator.state.deepspeed_plugin is not None:
A = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
A = 1
A = (len(snake_case__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
A = get_linear_schedule_with_warmup(
optimizer=snake_case__ , num_warmup_steps=0 , num_training_steps=snake_case__ , )
else:
A = DummyScheduler(snake_case__ , total_num_steps=snake_case__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A , A , A , A , A = accelerator.prepare(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# We need to keep track of how many total steps we have iterated over
A = 0
# We also need to keep track of the stating epoch so files are named properly
A = 0
# Now we train the model
A = evaluate.load('glue' , 'mrpc' )
A = 0
A = {}
for epoch in range(snake_case__ , snake_case__ ):
model.train()
for step, batch in enumerate(snake_case__ ):
A = model(**snake_case__ )
A = outputs.loss
A = loss / gradient_accumulation_steps
accelerator.backward(snake_case__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
A = 0
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A = model(**snake_case__ )
A = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
A , A = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(snake_case__ ) - 1:
A = predictions[: len(eval_dataloader.dataset ) - samples_seen]
A = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=snake_case__ , references=snake_case__ , )
A = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , snake_case__ )
A = eval_metric['accuracy']
if best_performance < eval_metric["accuracy"]:
A = eval_metric['accuracy']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F'Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f:
json.dump(snake_case__ , snake_case__ )
def _snake_case ( ):
A = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=snake_case__ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=snake_case__ , )
parser.add_argument(
'--output_dir' , type=snake_case__ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--performance_lower_bound' , type=snake_case__ , default=snake_case__ , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , )
parser.add_argument(
'--num_epochs' , type=snake_case__ , default=3 , help='Number of train epochs.' , )
A = parser.parse_args()
A = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(snake_case__ , snake_case__ )
if __name__ == "__main__":
main() | 74 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowerCAmelCase__ ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = StableDiffusionPanoramaPipeline
lowerCAmelCase__ = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase__ ( self : List[Any] ) -> Any:
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
__SCREAMING_SNAKE_CASE = DDIMScheduler()
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
__SCREAMING_SNAKE_CASE = CLIPTextModel(A_ )
__SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__SCREAMING_SNAKE_CASE = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any]=0 ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = torch.manual_seed(A_ )
__SCREAMING_SNAKE_CASE = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
# Setting height and width to None to prevent OOMs on CPU.
"""height""": None,
"""width""": None,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
__SCREAMING_SNAKE_CASE = StableDiffusionPanoramaPipeline(**A_ )
__SCREAMING_SNAKE_CASE = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs(A_ )
__SCREAMING_SNAKE_CASE = sd_pipe(**A_ ).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__SCREAMING_SNAKE_CASE = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : Any ) -> Dict:
"""simple docstring"""
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCAmelCase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25E-3 )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
__SCREAMING_SNAKE_CASE = StableDiffusionPanoramaPipeline(**A_ )
__SCREAMING_SNAKE_CASE = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs(A_ )
__SCREAMING_SNAKE_CASE = """french fries"""
__SCREAMING_SNAKE_CASE = sd_pipe(**A_ , negative_prompt=A_ )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__SCREAMING_SNAKE_CASE = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
__SCREAMING_SNAKE_CASE = StableDiffusionPanoramaPipeline(**A_ )
__SCREAMING_SNAKE_CASE = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs(A_ )
__SCREAMING_SNAKE_CASE = sd_pipe(**A_ , view_batch_size=2 )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__SCREAMING_SNAKE_CASE = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : Optional[int] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
__SCREAMING_SNAKE_CASE = EulerAncestralDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" )
__SCREAMING_SNAKE_CASE = StableDiffusionPanoramaPipeline(**A_ )
__SCREAMING_SNAKE_CASE = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs(A_ )
__SCREAMING_SNAKE_CASE = sd_pipe(**A_ ).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__SCREAMING_SNAKE_CASE = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
__SCREAMING_SNAKE_CASE = PNDMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , skip_prk_steps=A_ )
__SCREAMING_SNAKE_CASE = StableDiffusionPanoramaPipeline(**A_ )
__SCREAMING_SNAKE_CASE = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs(A_ )
__SCREAMING_SNAKE_CASE = sd_pipe(**A_ ).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__SCREAMING_SNAKE_CASE = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any]=0 ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = torch.manual_seed(A_ )
__SCREAMING_SNAKE_CASE = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """stabilityai/stable-diffusion-2-base"""
__SCREAMING_SNAKE_CASE = DDIMScheduler.from_pretrained(A_ , subfolder="""scheduler""" )
__SCREAMING_SNAKE_CASE = StableDiffusionPanoramaPipeline.from_pretrained(A_ , scheduler=A_ , safety_checker=A_ )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE = self.get_inputs()
__SCREAMING_SNAKE_CASE = pipe(**A_ ).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2_048, 3)
__SCREAMING_SNAKE_CASE = np.array(
[
0.36968392,
0.27025372,
0.32446766,
0.28379387,
0.36363274,
0.30733347,
0.27100027,
0.27054125,
0.25536096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : Any ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = StableDiffusionPanoramaPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-base""" , safety_checker=A_ )
__SCREAMING_SNAKE_CASE = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE = self.get_inputs()
__SCREAMING_SNAKE_CASE = pipe(**A_ ).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2_048, 3)
__SCREAMING_SNAKE_CASE = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def UpperCAmelCase__ ( self : Dict ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 0
def callback_fn(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : torch.FloatTensor ) -> None:
__SCREAMING_SNAKE_CASE = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
__SCREAMING_SNAKE_CASE = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
__SCREAMING_SNAKE_CASE = latents[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE = np.array(
[
0.18681869,
0.33907816,
0.5361276,
0.14432865,
-0.02856611,
-0.73941123,
0.23397987,
0.47322682,
-0.37823164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
__SCREAMING_SNAKE_CASE = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
__SCREAMING_SNAKE_CASE = latents[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE = np.array(
[
0.18539645,
0.33987248,
0.5378559,
0.14437142,
-0.02455261,
-0.7338317,
0.23990755,
0.47356272,
-0.3786505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = """stabilityai/stable-diffusion-2-base"""
__SCREAMING_SNAKE_CASE = DDIMScheduler.from_pretrained(A_ , subfolder="""scheduler""" )
__SCREAMING_SNAKE_CASE = StableDiffusionPanoramaPipeline.from_pretrained(A_ , scheduler=A_ , safety_checker=A_ )
__SCREAMING_SNAKE_CASE = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE = self.get_inputs()
pipe(**A_ , callback=A_ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def UpperCAmelCase__ ( self : int ) -> Tuple:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__SCREAMING_SNAKE_CASE = """stabilityai/stable-diffusion-2-base"""
__SCREAMING_SNAKE_CASE = DDIMScheduler.from_pretrained(A_ , subfolder="""scheduler""" )
__SCREAMING_SNAKE_CASE = StableDiffusionPanoramaPipeline.from_pretrained(A_ , scheduler=A_ , safety_checker=A_ )
__SCREAMING_SNAKE_CASE = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__SCREAMING_SNAKE_CASE = self.get_inputs()
__SCREAMING_SNAKE_CASE = pipe(**A_ )
__SCREAMING_SNAKE_CASE = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 267 |
"""simple docstring"""
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] ,A_ : str ,A_ : Dict=13 ,A_ : str=7 ,A_ : str=True ,A_ : Any=True ,A_ : Optional[Any]=True ,A_ : Any=True ,A_ : Optional[Any]=True ,A_ : Any=False ,A_ : str=False ,A_ : Tuple=False ,A_ : str=2 ,A_ : Optional[int]=99 ,A_ : Union[str, Any]=0 ,A_ : Optional[Any]=32 ,A_ : Optional[int]=5 ,A_ : Optional[int]=4 ,A_ : Union[str, Any]=0.1 ,A_ : List[str]=0.1 ,A_ : Union[str, Any]=512 ,A_ : Union[str, Any]=2 ,A_ : Any=0.02 ,A_ : List[str]=2 ,A_ : int=4 ,A_ : int="last" ,A_ : Dict=True ,A_ : Union[str, Any]=None ,A_ : Any=0 ,) -> List[Any]:
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_input_lengths
A = use_token_type_ids
A = use_labels
A = gelu_activation
A = sinusoidal_embeddings
A = causal
A = asm
A = n_langs
A = vocab_size
A = n_special
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_sequence_label_size
A = initializer_range
A = num_labels
A = num_choices
A = summary_type
A = use_proj
A = scope
A = bos_token_id
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A = random_attention_mask([self.batch_size, self.seq_length] )
A = None
if self.use_input_lengths:
A = (
ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
A = None
if self.use_token_type_ids:
A = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs )
A = None
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A = ids_tensor([self.batch_size] ,2 ).float()
A = ids_tensor([self.batch_size] ,self.num_choices )
A = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
return XLMConfig(
vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,num_labels=self.num_labels ,bos_token_id=self.bos_token_id ,)
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Any ,A_ : int ,A_ : Dict ,A_ : str ,A_ : Optional[Any] ,A_ : List[str] ,A_ : Union[str, Any] ,A_ : int ,A_ : str ,) -> Any:
A = XLMModel(config=A_ )
model.to(A_ )
model.eval()
A = model(A_ ,lengths=A_ ,langs=A_ )
A = model(A_ ,langs=A_ )
A = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Any ,A_ : str ,A_ : Optional[int] ,A_ : Union[str, Any] ,A_ : Optional[int] ,A_ : str ,A_ : Any ,A_ : str ,A_ : Dict ,) -> Dict:
A = XLMWithLMHeadModel(A_ )
model.to(A_ )
model.eval()
A = model(A_ ,token_type_ids=A_ ,labels=A_ )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : List[str] ,A_ : Union[str, Any] ,A_ : Union[str, Any] ,A_ : List[str] ,A_ : Any ,A_ : Optional[int] ,A_ : Optional[int] ,A_ : Optional[int] ,A_ : Optional[Any] ,) -> int:
A = XLMForQuestionAnsweringSimple(A_ )
model.to(A_ )
model.eval()
A = model(A_ )
A = model(A_ ,start_positions=A_ ,end_positions=A_ )
A = outputs
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Tuple ,A_ : Optional[int] ,A_ : Any ,A_ : List[Any] ,A_ : int ,A_ : Tuple ,A_ : Tuple ,A_ : List[str] ,A_ : Optional[int] ,) -> List[Any]:
A = XLMForQuestionAnswering(A_ )
model.to(A_ )
model.eval()
A = model(A_ )
A = model(
A_ ,start_positions=A_ ,end_positions=A_ ,cls_index=A_ ,is_impossible=A_ ,p_mask=A_ ,)
A = model(
A_ ,start_positions=A_ ,end_positions=A_ ,cls_index=A_ ,is_impossible=A_ ,)
((A) , ) = result_with_labels.to_tuple()
A = model(A_ ,start_positions=A_ ,end_positions=A_ )
((A) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape ,() )
self.parent.assertEqual(result.start_top_log_probs.shape ,(self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape ,(self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape ,(self.batch_size,) )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Tuple ,A_ : int ,A_ : Optional[int] ,A_ : List[str] ,A_ : str ,A_ : Optional[Any] ,A_ : Optional[int] ,A_ : Optional[Any] ,A_ : List[Any] ,) -> Optional[int]:
A = XLMForSequenceClassification(A_ )
model.to(A_ )
model.eval()
A = model(A_ )
A = model(A_ ,labels=A_ )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : List[Any] ,A_ : str ,A_ : Optional[Any] ,A_ : List[Any] ,A_ : Optional[int] ,A_ : Tuple ,A_ : Union[str, Any] ,A_ : Optional[int] ,A_ : Optional[int] ,) -> List[str]:
A = self.num_labels
A = XLMForTokenClassification(A_ )
model.to(A_ )
model.eval()
A = model(A_ ,attention_mask=A_ ,labels=A_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Optional[int] ,A_ : Union[str, Any] ,A_ : List[str] ,A_ : Optional[int] ,A_ : List[str] ,A_ : Optional[Any] ,A_ : Union[str, Any] ,A_ : Dict ,A_ : List[Any] ,) -> List[str]:
A = self.num_choices
A = XLMForMultipleChoice(config=A_ )
model.to(A_ )
model.eval()
A = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A = model(
A_ ,attention_mask=A_ ,token_type_ids=A_ ,labels=A_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
A = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = config_and_inputs
A = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( _lowercase , _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Union[str, Any] = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
_lowerCamelCase: str = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_lowerCamelCase: Optional[int] = (
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[int] ,A_ : Union[str, Any] ,A_ : Union[str, Any] ,A_ : Any ,A_ : Any ) -> Any:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : str ,A_ : Optional[int] ,A_ : List[Any]=False ) -> int:
A = super()._prepare_for_class(A_ ,A_ ,return_labels=A_ )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A_ )
A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A_ )
return inputs_dict
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
A = XLMModelTester(self )
A = ConfigTester(self ,config_class=A_ ,emb_dim=37 )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Union[str, Any] ,A_ : Any ,A_ : str ,A_ : Tuple ,A_ : Any ,A_ : Any=False ,A_ : Any=1 ) -> List[Any]:
self.assertIsInstance(A_ ,A_ )
self.assertListEqual(
[isinstance(A_ ,A_ ) for iter_attentions in attentions] ,[True] * len(A_ ) )
self.assertEqual(len(A_ ) ,(max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(A_ ):
# adds PAD dummy token
A = min_length + idx + 1
A = min_length + idx + 1
A = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] ,[expected_shape] * len(A_ ) )
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Optional[int] ,A_ : str ,A_ : Optional[int] ,A_ : int ,A_ : Any ,A_ : str=False ,A_ : Any=1 ) -> Tuple:
self.assertIsInstance(A_ ,A_ )
self.assertListEqual(
[isinstance(A_ ,A_ ) for iter_hidden_states in hidden_states] ,[True] * len(A_ ) ,)
self.assertEqual(len(A_ ) ,(max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(A_ ):
# adds PAD dummy token
A = min_length + idx + 1
A = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] ,[expected_shape] * len(A_ ) ,)
pass
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = XLMModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
A = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(A_ )
A = torch.tensor([[14, 447]] ,dtype=torch.long ,device=A_ ) # the president
A = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
A = model.generate(A_ ,do_sample=A_ )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() ,A_ ) | 74 | 0 |
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase =logging.get_logger(__name__)
set_seed(7_70)
_lowerCamelCase ={
"c_attn": "att_proj",
"c_proj": "out_proj",
"c_fc": "in_proj",
"transformer.": "",
"h.": "layers.",
"ln_1": "layernorm_1",
"ln_2": "layernorm_2",
"ln_f": "layernorm_final",
"wpe": "position_embeds_layer",
"wte": "input_embeds_layer",
}
_lowerCamelCase ={
"text_small": {
"repo_id": "suno/bark",
"file_name": "text.pt",
},
"coarse_small": {
"repo_id": "suno/bark",
"file_name": "coarse.pt",
},
"fine_small": {
"repo_id": "suno/bark",
"file_name": "fine.pt",
},
"text": {
"repo_id": "suno/bark",
"file_name": "text_2.pt",
},
"coarse": {
"repo_id": "suno/bark",
"file_name": "coarse_2.pt",
},
"fine": {
"repo_id": "suno/bark",
"file_name": "fine_2.pt",
},
}
_lowerCamelCase =os.path.dirname(os.path.abspath(__file__))
_lowerCamelCase =os.path.join(os.path.expanduser("~"), ".cache")
_lowerCamelCase =os.path.join(os.getenv("XDG_CACHE_HOME", default_cache_dir), "suno", "bark_v0")
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =model_type
if use_small:
key += "_small"
return os.path.join(snake_case__, REMOTE_MODEL_PATHS[key]['file_name'] )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
os.makedirs(snake_case__, exist_ok=snake_case__ )
hf_hub_download(repo_id=snake_case__, filename=snake_case__, local_dir=snake_case__ )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_=False, lowerCAmelCase_="text" ):
"""simple docstring"""
if model_type == "text":
SCREAMING_SNAKE_CASE =BarkSemanticModel
SCREAMING_SNAKE_CASE =BarkSemanticConfig
SCREAMING_SNAKE_CASE =BarkSemanticGenerationConfig
elif model_type == "coarse":
SCREAMING_SNAKE_CASE =BarkCoarseModel
SCREAMING_SNAKE_CASE =BarkCoarseConfig
SCREAMING_SNAKE_CASE =BarkCoarseGenerationConfig
elif model_type == "fine":
SCREAMING_SNAKE_CASE =BarkFineModel
SCREAMING_SNAKE_CASE =BarkFineConfig
SCREAMING_SNAKE_CASE =BarkFineGenerationConfig
else:
raise NotImplementedError()
SCREAMING_SNAKE_CASE =F'{model_type}_small' if use_small else model_type
SCREAMING_SNAKE_CASE =REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(snake_case__ ):
logger.info(F'{model_type} model not found, downloading into `{CACHE_DIR}`.' )
_download(model_info['repo_id'], model_info['file_name'] )
SCREAMING_SNAKE_CASE =torch.load(snake_case__, map_location=snake_case__ )
# this is a hack
SCREAMING_SNAKE_CASE =checkpoint['model_args']
if "input_vocab_size" not in model_args:
SCREAMING_SNAKE_CASE =model_args['vocab_size']
SCREAMING_SNAKE_CASE =model_args['vocab_size']
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
SCREAMING_SNAKE_CASE =model_args.pop('n_head' )
SCREAMING_SNAKE_CASE =model_args.pop('n_embd' )
SCREAMING_SNAKE_CASE =model_args.pop('n_layer' )
SCREAMING_SNAKE_CASE =ConfigClass(**checkpoint['model_args'] )
SCREAMING_SNAKE_CASE =ModelClass(config=snake_case__ )
SCREAMING_SNAKE_CASE =GenerationConfigClass()
SCREAMING_SNAKE_CASE =model_generation_config
SCREAMING_SNAKE_CASE =checkpoint['model']
# fixup checkpoint
SCREAMING_SNAKE_CASE ='_orig_mod.'
for k, v in list(state_dict.items() ):
if k.startswith(snake_case__ ):
# replace part of the key with corresponding layer name in HF implementation
SCREAMING_SNAKE_CASE =k[len(snake_case__ ) :]
for old_layer_name in new_layer_name_dict:
SCREAMING_SNAKE_CASE =new_k.replace(snake_case__, new_layer_name_dict[old_layer_name] )
SCREAMING_SNAKE_CASE =state_dict.pop(snake_case__ )
SCREAMING_SNAKE_CASE =set(state_dict.keys() ) - set(model.state_dict().keys() )
SCREAMING_SNAKE_CASE ={k for k in extra_keys if not k.endswith('.attn.bias' )}
SCREAMING_SNAKE_CASE =set(model.state_dict().keys() ) - set(state_dict.keys() )
SCREAMING_SNAKE_CASE ={k for k in missing_keys if not k.endswith('.attn.bias' )}
if len(snake_case__ ) != 0:
raise ValueError(F'extra keys found: {extra_keys}' )
if len(snake_case__ ) != 0:
raise ValueError(F'missing keys: {missing_keys}' )
model.load_state_dict(snake_case__, strict=snake_case__ )
SCREAMING_SNAKE_CASE =model.num_parameters(exclude_embeddings=snake_case__ )
SCREAMING_SNAKE_CASE =checkpoint['best_val_loss'].item()
logger.info(F'model loaded: {round(n_params/1e6, 1 )}M params, {round(snake_case__, 3 )} loss' )
model.eval()
model.to(snake_case__ )
del checkpoint, state_dict
return model
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_=False, lowerCAmelCase_="text" ):
"""simple docstring"""
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
SCREAMING_SNAKE_CASE ='cpu' # do conversion on cpu
SCREAMING_SNAKE_CASE =_get_ckpt_path(snake_case__, use_small=snake_case__ )
SCREAMING_SNAKE_CASE =_load_model(snake_case__, snake_case__, model_type=snake_case__, use_small=snake_case__ )
# load bark initial model
SCREAMING_SNAKE_CASE =_bark_load_model(snake_case__, 'cpu', model_type=snake_case__, use_small=snake_case__ )
if model_type == "text":
SCREAMING_SNAKE_CASE =bark_model['model']
if model.num_parameters(exclude_embeddings=snake_case__ ) != bark_model.get_num_params():
raise ValueError('initial and new models don\'t have the same number of parameters' )
# check if same output as the bark model
SCREAMING_SNAKE_CASE =5
SCREAMING_SNAKE_CASE =10
if model_type in ["text", "coarse"]:
SCREAMING_SNAKE_CASE =torch.randint(256, (batch_size, sequence_length), dtype=torch.int )
SCREAMING_SNAKE_CASE =bark_model(snake_case__ )[0]
SCREAMING_SNAKE_CASE =model(snake_case__ )
# take last logits
SCREAMING_SNAKE_CASE =output_new_model_total.logits[:, [-1], :]
else:
SCREAMING_SNAKE_CASE =3
SCREAMING_SNAKE_CASE =8
SCREAMING_SNAKE_CASE =torch.randint(256, (batch_size, sequence_length, n_codes_total), dtype=torch.int )
SCREAMING_SNAKE_CASE =model(snake_case__, snake_case__ )
SCREAMING_SNAKE_CASE =bark_model(snake_case__, snake_case__ )
SCREAMING_SNAKE_CASE =output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('initial and new outputs don\'t have the same shape' )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError('initial and new outputs are not equal' )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =os.path.join(snake_case__, snake_case__ )
SCREAMING_SNAKE_CASE =BarkSemanticConfig.from_pretrained(os.path.join(snake_case__, 'config.json' ) )
SCREAMING_SNAKE_CASE =BarkCoarseConfig.from_pretrained(os.path.join(snake_case__, 'config.json' ) )
SCREAMING_SNAKE_CASE =BarkFineConfig.from_pretrained(os.path.join(snake_case__, 'config.json' ) )
SCREAMING_SNAKE_CASE =EncodecConfig.from_pretrained('facebook/encodec_24khz' )
SCREAMING_SNAKE_CASE =BarkSemanticModel.from_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE =BarkCoarseModel.from_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE =BarkFineModel.from_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE =EncodecModel.from_pretrained('facebook/encodec_24khz' )
SCREAMING_SNAKE_CASE =BarkConfig.from_sub_model_configs(
snake_case__, snake_case__, snake_case__, snake_case__ )
SCREAMING_SNAKE_CASE =BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config, coarseAcoustic.generation_config, fineAcoustic.generation_config )
SCREAMING_SNAKE_CASE =BarkModel(snake_case__ )
SCREAMING_SNAKE_CASE =semantic
SCREAMING_SNAKE_CASE =coarseAcoustic
SCREAMING_SNAKE_CASE =fineAcoustic
SCREAMING_SNAKE_CASE =codec
SCREAMING_SNAKE_CASE =bark_generation_config
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
bark.save_pretrained(snake_case__, repo_id=snake_case__, push_to_hub=snake_case__ )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument("model_type", type=str, help="text, coarse or fine.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--is_small", action="store_true", help="convert the small version instead of the large.")
_lowerCamelCase =parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 334 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
_lowercase = logging.get_logger(__name__)
@dataclass
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[int] = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self : int ,**A_ : Any ) -> Any:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
A = deprecated_arg[3:]
A = not kwargs.pop(A_ )
logger.warning(
F'{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'
F' {positive_arg}={kwargs[positive_arg]}' )
A = kwargs.pop('tpu_name' ,self.tpu_name )
A = kwargs.pop('device_idx' ,self.device_idx )
A = kwargs.pop('eager_mode' ,self.eager_mode )
A = kwargs.pop('use_xla' ,self.use_xla )
super().__init__(**A_ )
_lowerCamelCase: str = field(
default=_lowercase , metadata={'''help''': '''Name of TPU'''} , )
_lowerCamelCase: int = field(
default=0 , metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''} , )
_lowerCamelCase: bool = field(default=_lowercase , metadata={'''help''': '''Benchmark models in eager model.'''} )
_lowerCamelCase: bool = field(
default=_lowercase , metadata={
'''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'''
} , )
@cached_property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self ,['tf'] )
A = None
if self.tpu:
try:
if self.tpu_name:
A = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
A = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
A = None
return tpu
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self ,['tf'] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
A = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] ,'GPU' )
A = tf.distribute.OneDeviceStrategy(device=F'/gpu:{self.device_idx}' )
else:
tf.config.set_visible_devices([] ,'GPU' ) # disable GPU
A = tf.distribute.OneDeviceStrategy(device=F'/cpu:{self.device_idx}' )
return strategy
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> bool:
requires_backends(self ,['tf'] )
return self._setup_tpu is not None
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> "tf.distribute.Strategy":
requires_backends(self ,['tf'] )
return self._setup_strategy
@property
def _SCREAMING_SNAKE_CASE ( self : int ) -> str:
requires_backends(self ,['tf'] )
return tf.config.list_physical_devices('GPU' )
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
requires_backends(self ,['tf'] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def _SCREAMING_SNAKE_CASE ( self : str ) -> bool:
return self.n_gpu > 0 | 74 | 0 |
"""simple docstring"""
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
__lowerCamelCase = logging.getLogger(__name__)
__lowerCamelCase = "pytorch_model.bin"
@dataclasses.dataclass
class UpperCamelCase__:
lowerCAmelCase__ : str = dataclasses.field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models.'} )
lowerCAmelCase__ : Optional[str] = dataclasses.field(
default=_lowercase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co.'} , )
@dataclasses.dataclass
class UpperCamelCase__:
lowerCAmelCase__ : str = dataclasses.field(metadata={'help': 'A csv or a json file containing the training data.'} )
lowerCAmelCase__ : str = dataclasses.field(metadata={'help': 'A csv or a json file containing the data to predict on.'} )
lowerCAmelCase__ : Optional[str] = dataclasses.field(
default=_lowercase , metadata={'help': 'A csv or a json file containing the validation data.'} )
lowerCAmelCase__ : Optional[str] = dataclasses.field(
default=_lowercase , metadata={'help': 'The name of the task to train on.'} , )
lowerCAmelCase__ : Optional[List[str]] = dataclasses.field(
default=_lowercase , metadata={'help': 'The list of labels for the task.'} )
@dataclasses.dataclass
class UpperCamelCase__:
lowerCAmelCase__ : str = dataclasses.field(
metadata={'help': 'The output directory where the model predictions and checkpoints will be written.'} )
lowerCAmelCase__ : Optional[str] = dataclasses.field(
default='accuracy' , metadata={'help': 'The evaluation metric used for the task.'} )
lowerCAmelCase__ : Optional[str] = dataclasses.field(
default='no' , metadata={
'help': 'The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]'
} , )
lowerCAmelCase__ : Optional[int] = dataclasses.field(
default=10 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
lowerCAmelCase__ : Optional[float] = dataclasses.field(
default=0.0 , metadata={
'help': 'How much the specified evaluation metric must improve to satisfy early stopping conditions.'
} , )
lowerCAmelCase__ : Optional[bool] = dataclasses.field(
default=_lowercase , metadata={'help': 'Whether to filter the pseudo-labeled data based on the confidence score.'} , )
lowerCAmelCase__ : Optional[bool] = dataclasses.field(
default=_lowercase , metadata={'help': 'Whether to filter the pseudo-labeled data based on the validation performance.'} , )
lowerCAmelCase__ : Optional[bool] = dataclasses.field(
default=_lowercase , metadata={'help': 'Whether to fine-tune on labeled data after pseudo training.'} , )
lowerCAmelCase__ : Optional[float] = dataclasses.field(
default=0.0 , metadata={'help': 'Confidence threshold for pseudo-labeled data filtering.'} , )
lowerCAmelCase__ : Optional[int] = dataclasses.field(
default=100 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
lowerCAmelCase__ : Optional[int] = dataclasses.field(
default=_lowercase , metadata={'help': 'Random seed for initialization.'} , )
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
A__ = dataset.filter(lambda UpperCamelCase__ : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
A__ = int(eval_result * len(snake_case__ ) )
print(snake_case__ )
A__ = dataset.sort('probability' , reverse=snake_case__ )
A__ = dataset.select(range(snake_case__ ) )
A__ = dataset.remove_columns(['label', 'probability'] )
A__ = dataset.rename_column('prediction' , 'label' )
A__ = dataset.map(lambda UpperCamelCase__ : {"label": idalabel[example["label"]]} )
A__ = dataset.shuffle(seed=args.seed )
A__ = os.path.join(snake_case__ , F'''train_pseudo.{args.data_file_extension}''' )
if args.data_file_extension == "csv":
dataset.to_csv(snake_case__ , index=snake_case__ )
else:
dataset.to_json(snake_case__ )
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ):
"""simple docstring"""
A__ = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
A__ = STModelArguments(model_name_or_path=snake_case__ )
A__ = STDataArguments(train_file=snake_case__ , infer_file=snake_case__ )
A__ = STTrainingArguments(output_dir=snake_case__ )
A__ = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(snake_case__ ).items():
setattr(snake_case__ , snake_case__ , snake_case__ )
for key, value in kwargs.items():
if hasattr(snake_case__ , snake_case__ ):
setattr(snake_case__ , snake_case__ , snake_case__ )
# Sanity checks
A__ = {}
A__ = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
A__ = args.train_file
A__ = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
A__ = args.eval_file
for key in data_files:
A__ = data_files[key].split('.' )[-1]
assert extension in ["csv", "json"], F'''`{key}_file` should be a csv or a json file.'''
if args.data_file_extension is None:
A__ = extension
else:
assert extension == args.data_file_extension, F'''`{key}_file` should be a {args.data_file_extension} file`.'''
assert (
args.eval_metric in datasets.list_metrics()
), F'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'''
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('Creating the initial data directory for self-training...' )
A__ = F'''{args.output_dir}/self-train_iter-{{}}'''.format
A__ = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=snake_case__ )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
accelerator.wait_for_everyone()
A__ = None
A__ = None
A__ = 0
A__ = False
# Show the progress bar
A__ = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
A__ = data_dir_format(snake_case__ )
assert os.path.exists(snake_case__ )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
A__ = os.path.join(snake_case__ , 'stage-1' )
A__ = {
'accelerator': accelerator,
'model_name_or_path': args.model_name_or_path,
'cache_dir': args.cache_dir,
'do_train': True,
'train_file': data_files['train'] if iteration == 0 else data_files['train_pseudo'],
'do_eval': True if args.eval_file is not None else False,
'eval_file': data_files['eval'],
'do_predict': True,
'infer_file': data_files['infer'],
'task_name': args.task_name,
'label_list': args.label_list,
'output_dir': current_output_dir,
'eval_metric': args.eval_metric,
'evaluation_strategy': args.evaluation_strategy,
'early_stopping_patience': args.early_stopping_patience,
'early_stopping_threshold': args.early_stopping_threshold,
'seed': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(snake_case__ , snake_case__ ):
arguments_dict.update({key: value} )
A__ = os.path.join(snake_case__ , 'best-checkpoint' , snake_case__ )
if os.path.exists(snake_case__ ):
logger.info(
'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.' , snake_case__ , snake_case__ , )
else:
logger.info('***** Running self-training: iteration: %d, stage: 1 *****' , snake_case__ )
finetune(**snake_case__ )
accelerator.wait_for_everyone()
assert os.path.exists(snake_case__ )
logger.info('Self-training job completed: iteration: %d, stage: 1.' , snake_case__ )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
A__ = os.path.join(snake_case__ , 'best-checkpoint' )
A__ = os.path.join(snake_case__ , 'stage-2' )
# Update arguments_dict
A__ = model_path
A__ = data_files['train']
A__ = current_output_dir
A__ = os.path.join(snake_case__ , 'best-checkpoint' , snake_case__ )
if os.path.exists(snake_case__ ):
logger.info(
'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.' , snake_case__ , snake_case__ , )
else:
logger.info('***** Running self-training: iteration: %d, stage: 2 *****' , snake_case__ )
finetune(**snake_case__ )
accelerator.wait_for_everyone()
assert os.path.exists(snake_case__ )
logger.info('Self-training job completed: iteration: %d, stage: 2.' , snake_case__ )
A__ = iteration
A__ = data_dir_format(iteration + 1 )
A__ = AutoConfig.from_pretrained(os.path.join(snake_case__ , 'best-checkpoint' ) )
A__ = config.idalabel
A__ = os.path.join(snake_case__ , 'eval_results_best-checkpoint.json' )
A__ = os.path.join(snake_case__ , 'test_results_best-checkpoint.json' )
assert os.path.exists(snake_case__ )
with open(snake_case__ , 'r' ) as f:
A__ = float(json.load(snake_case__ )[args.eval_metric] )
A__ = os.path.join(snake_case__ , 'infer_output_best-checkpoint.csv' )
assert os.path.exists(snake_case__ )
# Loading the dataset from local csv or json files.
A__ = load_dataset(args.data_file_extension , data_files={'data': data_files['infer']} )['data']
A__ = load_dataset('csv' , data_files={'data': infer_output_file} )['data']
if accelerator.is_main_process:
os.makedirs(snake_case__ , exist_ok=snake_case__ )
shutil.copy(snake_case__ , os.path.join(snake_case__ , F'''eval_results_iter-{iteration}.json''' ) )
if os.path.exists(snake_case__ ):
shutil.copy(snake_case__ , os.path.join(snake_case__ , F'''test_results_iter-{iteration}.json''' ) )
create_pseudo_labeled_data(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
accelerator.wait_for_everyone()
A__ = os.path.join(snake_case__ , F'''train_pseudo.{args.data_file_extension}''' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
A__ = eval_result
if best_iteration is None:
A__ = new_iteration
A__ = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
A__ = new_iteration
A__ = new_eval_result
A__ = 0
else:
if new_eval_result == best_eval_result:
A__ = new_iteration
A__ = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
A__ = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('Best iteration: %d' , snake_case__ )
logger.info('Best evaluation result: %s = %f' , args.eval_metric , snake_case__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(snake_case__ , F'''eval_results_iter-{iteration}.json''' ) , os.path.join(snake_case__ , 'eval_results_best-iteration.json' ) , )
else:
# Assume that the last iteration is the best
logger.info('Best iteration: %d' , args.max_selftrain_iterations - 1 )
logger.info('Best evaluation result: %s = %f' , args.eval_metric , snake_case__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(snake_case__ , F'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''' ) , os.path.join(snake_case__ , 'eval_results_best-iteration.json' ) , )
| 221 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Tuple = '''dpt'''
def __init__( self : str ,A_ : Tuple=768 ,A_ : int=12 ,A_ : Optional[int]=12 ,A_ : Optional[int]=3072 ,A_ : List[str]="gelu" ,A_ : str=0.0 ,A_ : int=0.0 ,A_ : str=0.02 ,A_ : str=1e-12 ,A_ : str=384 ,A_ : Dict=16 ,A_ : Union[str, Any]=3 ,A_ : Dict=False ,A_ : Any=True ,A_ : Optional[int]=[2, 5, 8, 11] ,A_ : Optional[Any]="project" ,A_ : Tuple=[4, 2, 1, 0.5] ,A_ : int=[96, 192, 384, 768] ,A_ : int=256 ,A_ : str=-1 ,A_ : Optional[int]=False ,A_ : Optional[int]=True ,A_ : Union[str, Any]=0.4 ,A_ : Union[str, Any]=255 ,A_ : Union[str, Any]=0.1 ,A_ : List[str]=[1, 1024, 24, 24] ,A_ : List[str]=[0, 1] ,A_ : List[Any]=None ,**A_ : Tuple ,) -> Union[str, Any]:
super().__init__(**A_ )
A = hidden_size
A = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('Initializing the config with a `BiT` backbone.' )
A = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
}
A = BitConfig(**A_ )
elif isinstance(A_ ,A_ ):
logger.info('Initializing the config with a `BiT` backbone.' )
A = BitConfig(**A_ )
elif isinstance(A_ ,A_ ):
A = backbone_config
else:
raise ValueError(
F'backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.' )
A = backbone_featmap_shape
A = neck_ignore_stages
if readout_type != "project":
raise ValueError('Readout type must be \'project\' when using `DPT-hybrid` mode.' )
else:
A = None
A = None
A = []
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = image_size
A = patch_size
A = num_channels
A = qkv_bias
A = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('Readout_type must be one of [\'ignore\', \'add\', \'project\']' )
A = readout_type
A = reassemble_factors
A = neck_hidden_sizes
A = fusion_hidden_size
A = head_in_index
A = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
A = use_auxiliary_head
A = auxiliary_loss_weight
A = semantic_loss_ignore_index
A = semantic_classifier_dropout
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
A = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
A = self.backbone_config.to_dict()
A = self.__class__.model_type
return output | 74 | 0 |
"""simple docstring"""
def lowercase ( _snake_case : float , _snake_case : int ) ->Dict:
"""simple docstring"""
if digit_amount > 0:
return round(number - int(snake_case__ ) , snake_case__ )
return number - int(snake_case__ )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 102 |
"""simple docstring"""
from __future__ import annotations
import math
_lowercase = '''2020.9.26'''
_lowercase = '''xcodz-dot, cclaus, dhruvmanila'''
def _snake_case ( snake_case__ : float , snake_case__ : float , snake_case__ : float , snake_case__ : float , snake_case__ : float ):
if not all(isinstance(snake_case__ , (float, int) ) for val in locals().values() ):
A = F'Input values must either be float or int: {list(locals().values() )}'
raise TypeError(snake_case__ )
A = ((x * distance) / (z + distance)) * scale
A = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def _snake_case ( snake_case__ : float , snake_case__ : float , snake_case__ : float , snake_case__ : str , snake_case__ : float ):
if not isinstance(snake_case__ , snake_case__ ):
raise TypeError('Axis must be a str' )
A = locals()
del input_variables["axis"]
if not all(isinstance(snake_case__ , (float, int) ) for val in input_variables.values() ):
A = (
'Input values except axis must either be float or int: '
F'{list(input_variables.values() )}'
)
raise TypeError(snake_case__ )
A = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
A = x * math.cos(snake_case__ ) - y * math.sin(snake_case__ )
A = y * math.cos(snake_case__ ) + x * math.sin(snake_case__ )
A = z
elif axis == "x":
A = y * math.cos(snake_case__ ) - z * math.sin(snake_case__ )
A = z * math.cos(snake_case__ ) + y * math.sin(snake_case__ )
A = x
elif axis == "y":
A = x * math.cos(snake_case__ ) - z * math.sin(snake_case__ )
A = z * math.cos(snake_case__ ) + x * math.sin(snake_case__ )
A = y
else:
raise ValueError('not a valid axis, choose one of \'x\', \'y\', \'z\'' )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }""")
print(F"""{rotate(1.0, 2.0, 3.0, 'y', 90.0) = }""") | 74 | 0 |
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=13 , __lowercase=7 , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=False , __lowercase=False , __lowercase=False , __lowercase=2 , __lowercase=99 , __lowercase=0 , __lowercase=32 , __lowercase=5 , __lowercase=4 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=512 , __lowercase=2 , __lowercase=0.02 , __lowercase=2 , __lowercase=4 , __lowercase="last" , __lowercase=True , __lowercase=None , __lowercase=0 , ) -> List[Any]:
__UpperCamelCase :str = parent
__UpperCamelCase :Union[str, Any] = batch_size
__UpperCamelCase :Any = seq_length
__UpperCamelCase :str = is_training
__UpperCamelCase :Any = use_input_lengths
__UpperCamelCase :int = use_token_type_ids
__UpperCamelCase :Dict = use_labels
__UpperCamelCase :str = gelu_activation
__UpperCamelCase :Any = sinusoidal_embeddings
__UpperCamelCase :Dict = causal
__UpperCamelCase :Optional[int] = asm
__UpperCamelCase :Optional[int] = n_langs
__UpperCamelCase :List[str] = vocab_size
__UpperCamelCase :int = n_special
__UpperCamelCase :List[Any] = hidden_size
__UpperCamelCase :int = num_hidden_layers
__UpperCamelCase :List[Any] = num_attention_heads
__UpperCamelCase :int = hidden_dropout_prob
__UpperCamelCase :Dict = attention_probs_dropout_prob
__UpperCamelCase :Optional[Any] = max_position_embeddings
__UpperCamelCase :List[str] = type_sequence_label_size
__UpperCamelCase :Dict = initializer_range
__UpperCamelCase :Any = num_labels
__UpperCamelCase :Tuple = num_choices
__UpperCamelCase :Any = summary_type
__UpperCamelCase :Dict = use_proj
__UpperCamelCase :Union[str, Any] = scope
__UpperCamelCase :List[str] = bos_token_id
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__UpperCamelCase :int = random_attention_mask([self.batch_size, self.seq_length])
__UpperCamelCase :int = None
if self.use_input_lengths:
__UpperCamelCase :List[str] = (
ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
__UpperCamelCase :Optional[Any] = None
if self.use_token_type_ids:
__UpperCamelCase :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs)
__UpperCamelCase :Tuple = None
__UpperCamelCase :Optional[int] = None
__UpperCamelCase :Any = None
if self.use_labels:
__UpperCamelCase :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__UpperCamelCase :Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__UpperCamelCase :Union[str, Any] = ids_tensor([self.batch_size] , 2).float()
__UpperCamelCase :Optional[int] = ids_tensor([self.batch_size] , self.num_choices)
__UpperCamelCase :List[Any] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCamelCase__ ( self) -> Dict:
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Any:
__UpperCamelCase :str = XLMModel(config=A_)
model.to(A_)
model.eval()
__UpperCamelCase :Optional[int] = model(A_ , lengths=A_ , langs=A_)
__UpperCamelCase :List[str] = model(A_ , langs=A_)
__UpperCamelCase :List[Any] = model(A_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Dict:
__UpperCamelCase :int = XLMWithLMHeadModel(A_)
model.to(A_)
model.eval()
__UpperCamelCase :Optional[Any] = model(A_ , token_type_ids=A_ , labels=A_)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> int:
__UpperCamelCase :Tuple = XLMForQuestionAnsweringSimple(A_)
model.to(A_)
model.eval()
__UpperCamelCase :Tuple = model(A_)
__UpperCamelCase :Any = model(A_ , start_positions=A_ , end_positions=A_)
__UpperCamelCase :str = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> List[Any]:
__UpperCamelCase :Dict = XLMForQuestionAnswering(A_)
model.to(A_)
model.eval()
__UpperCamelCase :Optional[Any] = model(A_)
__UpperCamelCase :int = model(
A_ , start_positions=A_ , end_positions=A_ , cls_index=A_ , is_impossible=A_ , p_mask=A_ , )
__UpperCamelCase :List[Any] = model(
A_ , start_positions=A_ , end_positions=A_ , cls_index=A_ , is_impossible=A_ , )
((__UpperCamelCase ) , ) :Union[str, Any] = result_with_labels.to_tuple()
__UpperCamelCase :Dict = model(A_ , start_positions=A_ , end_positions=A_)
((__UpperCamelCase ) , ) :Union[str, Any] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , ())
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Optional[int]:
__UpperCamelCase :Dict = XLMForSequenceClassification(A_)
model.to(A_)
model.eval()
__UpperCamelCase :List[str] = model(A_)
__UpperCamelCase :List[Any] = model(A_ , labels=A_)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> List[str]:
__UpperCamelCase :Optional[int] = self.num_labels
__UpperCamelCase :Optional[int] = XLMForTokenClassification(A_)
model.to(A_)
model.eval()
__UpperCamelCase :Any = model(A_ , attention_mask=A_ , labels=A_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> List[str]:
__UpperCamelCase :Dict = self.num_choices
__UpperCamelCase :Optional[int] = XLMForMultipleChoice(config=A_)
model.to(A_)
model.eval()
__UpperCamelCase :str = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__UpperCamelCase :Optional[Any] = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__UpperCamelCase :Any = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__UpperCamelCase :Tuple = model(
A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase :List[Any] = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) :List[Any] = config_and_inputs
__UpperCamelCase :List[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( _lowercase , _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
a__ : Union[str, Any] = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
a__ : str = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
a__ : Optional[int] = (
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) -> Any:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''')
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase=False) -> int:
__UpperCamelCase :Any = super()._prepare_for_class(A_ , A_ , return_labels=A_)
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
__UpperCamelCase :List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A_)
__UpperCamelCase :List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A_)
return inputs_dict
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :Optional[int] = XLMModelTester(self)
__UpperCamelCase :List[str] = ConfigTester(self , config_class=A_ , emb_dim=37)
def UpperCamelCase__ ( self) -> str:
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*A_)
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*A_)
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*A_)
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*A_)
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*A_)
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*A_)
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*A_)
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase=False , __lowercase=1) -> List[Any]:
self.assertIsInstance(A_ , A_)
self.assertListEqual(
[isinstance(A_ , A_) for iter_attentions in attentions] , [True] * len(A_))
self.assertEqual(len(A_) , (max_length - min_length) * num_beam_groups)
for idx, iter_attentions in enumerate(A_):
# adds PAD dummy token
__UpperCamelCase :str = min_length + idx + 1
__UpperCamelCase :str = min_length + idx + 1
__UpperCamelCase :Dict = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(A_))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase=False , __lowercase=1) -> Tuple:
self.assertIsInstance(A_ , A_)
self.assertListEqual(
[isinstance(A_ , A_) for iter_hidden_states in hidden_states] , [True] * len(A_) , )
self.assertEqual(len(A_) , (max_length - min_length) * num_beam_groups)
for idx, iter_hidden_states in enumerate(A_):
# adds PAD dummy token
__UpperCamelCase :str = min_length + idx + 1
__UpperCamelCase :List[str] = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(A_) , )
pass
@slow
def UpperCamelCase__ ( self) -> Optional[Any]:
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase :Dict = XLMModel.from_pretrained(A_)
self.assertIsNotNone(A_)
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :Any = XLMWithLMHeadModel.from_pretrained('''xlm-mlm-en-2048''')
model.to(A_)
__UpperCamelCase :int = torch.tensor([[14, 447]] , dtype=torch.long , device=A_) # the president
__UpperCamelCase :Union[str, Any] = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
__UpperCamelCase :List[Any] = model.generate(A_ , do_sample=A_)
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , A_)
| 43 |
"""simple docstring"""
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : int ,A_ : int ) -> Union[str, Any]:
A = n
A = [None] * self.n
A = 0 # index of the first element
A = 0
A = 0
def __len__( self : int ) -> int:
return self.size
def _SCREAMING_SNAKE_CASE ( self : Any ) -> bool:
return self.size == 0
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
return False if self.is_empty() else self.array[self.front]
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : List[Any] ) -> int:
if self.size >= self.n:
raise Exception('QUEUE IS FULL' )
A = data
A = (self.rear + 1) % self.n
self.size += 1
return self
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
if self.size == 0:
raise Exception('UNDERFLOW' )
A = self.array[self.front]
A = None
A = (self.front + 1) % self.n
self.size -= 1
return temp | 74 | 0 |
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(_lowercase)
class __magic_name__ ( _lowercase):
def __init__( self : Tuple , **lowerCamelCase__ : str ) -> str:
'''simple docstring'''
super().__init__(**A_ )
requires_backends(self , '''vision''' )
requires_backends(self , '''torch''' )
if self.framework != "pt":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
self.check_model_type(A_ )
def UpperCAmelCase__ ( self : Tuple , **lowerCamelCase__ : Union[str, Any] ) -> int:
'''simple docstring'''
UpperCamelCase__ : int = {}
UpperCamelCase__ : Any = {}
UpperCamelCase__ : Any = {}
# preprocess args
if "points_per_batch" in kwargs:
UpperCamelCase__ : int = kwargs['''points_per_batch''']
if "points_per_crop" in kwargs:
UpperCamelCase__ : List[str] = kwargs['''points_per_crop''']
if "crops_n_layers" in kwargs:
UpperCamelCase__ : Optional[int] = kwargs['''crops_n_layers''']
if "crop_overlap_ratio" in kwargs:
UpperCamelCase__ : List[Any] = kwargs['''crop_overlap_ratio''']
if "crop_n_points_downscale_factor" in kwargs:
UpperCamelCase__ : Union[str, Any] = kwargs['''crop_n_points_downscale_factor''']
# postprocess args
if "pred_iou_thresh" in kwargs:
UpperCamelCase__ : Optional[Any] = kwargs['''pred_iou_thresh''']
if "stability_score_offset" in kwargs:
UpperCamelCase__ : Tuple = kwargs['''stability_score_offset''']
if "mask_threshold" in kwargs:
UpperCamelCase__ : Union[str, Any] = kwargs['''mask_threshold''']
if "stability_score_thresh" in kwargs:
UpperCamelCase__ : Union[str, Any] = kwargs['''stability_score_thresh''']
if "crops_nms_thresh" in kwargs:
UpperCamelCase__ : int = kwargs['''crops_nms_thresh''']
if "output_rle_mask" in kwargs:
UpperCamelCase__ : Union[str, Any] = kwargs['''output_rle_mask''']
if "output_bboxes_mask" in kwargs:
UpperCamelCase__ : Optional[int] = kwargs['''output_bboxes_mask''']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self : Union[str, Any] , lowerCamelCase__ : str , *lowerCamelCase__ : Dict , lowerCamelCase__ : int=None , lowerCamelCase__ : Optional[int]=None , **lowerCamelCase__ : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return super().__call__(A_ , *A_ , num_workers=A_ , batch_size=A_ , **A_ )
def UpperCAmelCase__ ( self : Tuple , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str]=64 , lowerCamelCase__ : int = 0 , lowerCamelCase__ : float = 512 / 1500 , lowerCamelCase__ : Optional[int] = 32 , lowerCamelCase__ : Optional[int] = 1 , ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : Dict = load_image(A_ )
UpperCamelCase__ : Union[str, Any] = self.image_processor.size['''longest_edge''']
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : str = self.image_processor.generate_crop_boxes(
A_ , A_ , A_ , A_ , A_ , A_ )
UpperCamelCase__ : List[str] = self.image_processor(images=A_ , return_tensors='''pt''' )
with self.device_placement():
if self.framework == "pt":
UpperCamelCase__ : Dict = self.get_inference_context()
with inference_context():
UpperCamelCase__ : Dict = self._ensure_tensor_on_device(A_ , device=self.device )
UpperCamelCase__ : Union[str, Any] = self.model.get_image_embeddings(model_inputs.pop('''pixel_values''' ) )
UpperCamelCase__ : Optional[int] = image_embeddings
UpperCamelCase__ : Any = grid_points.shape[1]
UpperCamelCase__ : List[str] = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
'''Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. '''
'''To return all points at once, set points_per_batch to None''' )
for i in range(0 , A_ , A_ ):
UpperCamelCase__ : Union[str, Any] = grid_points[:, i : i + points_per_batch, :, :]
UpperCamelCase__ : Any = input_labels[:, i : i + points_per_batch]
UpperCamelCase__ : List[str] = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def UpperCAmelCase__ ( self : Tuple , lowerCamelCase__ : List[str] , lowerCamelCase__ : List[Any]=0.88 , lowerCamelCase__ : Dict=0.95 , lowerCamelCase__ : Optional[int]=0 , lowerCamelCase__ : Union[str, Any]=1 , ) -> str:
'''simple docstring'''
UpperCamelCase__ : List[Any] = model_inputs.pop('''input_boxes''' )
UpperCamelCase__ : Any = model_inputs.pop('''is_last''' )
UpperCamelCase__ : Tuple = model_inputs.pop('''original_sizes''' ).tolist()
UpperCamelCase__ : List[str] = model_inputs.pop('''reshaped_input_sizes''' ).tolist()
UpperCamelCase__ : Union[str, Any] = self.model(**A_ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
UpperCamelCase__ : List[Any] = model_outputs['''pred_masks''']
UpperCamelCase__ : List[str] = self.image_processor.post_process_masks(
A_ , A_ , A_ , A_ , binarize=A_ )
UpperCamelCase__ : str = model_outputs['''iou_scores''']
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Any = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , A_ , A_ , A_ , A_ , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def UpperCAmelCase__ ( self : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Optional[Any]=False , lowerCamelCase__ : int=False , lowerCamelCase__ : int=0.7 , ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = []
UpperCamelCase__ : Union[str, Any] = []
UpperCamelCase__ : Tuple = []
for model_output in model_outputs:
all_scores.append(model_output.pop('''iou_scores''' ) )
all_masks.extend(model_output.pop('''masks''' ) )
all_boxes.append(model_output.pop('''boxes''' ) )
UpperCamelCase__ : str = torch.cat(A_ )
UpperCamelCase__ : List[str] = torch.cat(A_ )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : List[str] = self.image_processor.post_process_for_mask_generation(
A_ , A_ , A_ , A_ )
UpperCamelCase__ : Union[str, Any] = defaultdict(A_ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(A_ )
UpperCamelCase__ : Optional[Any] = {}
if output_rle_mask:
UpperCamelCase__ : Optional[int] = rle_mask
if output_bboxes_mask:
UpperCamelCase__ : int = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 146 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
_lowercase = logging.get_logger(__name__)
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,*A_ : List[str] ,**A_ : int ) -> None:
warnings.warn(
'The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use YolosImageProcessor instead.' ,A_ ,)
super().__init__(*A_ ,**A_ ) | 74 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'facebook/deit-base-distilled-patch16-224': (
'https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json'
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = '''deit'''
def __init__( self : int , _UpperCAmelCase : Optional[Any]=7_68 , _UpperCAmelCase : Union[str, Any]=12 , _UpperCAmelCase : Dict=12 , _UpperCAmelCase : int=30_72 , _UpperCAmelCase : Optional[Any]="gelu" , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Any=0.0 , _UpperCAmelCase : str=0.02 , _UpperCAmelCase : Tuple=1E-12 , _UpperCAmelCase : Union[str, Any]=2_24 , _UpperCAmelCase : Optional[Any]=16 , _UpperCAmelCase : List[Any]=3 , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Optional[int]=16 , **_UpperCAmelCase : Union[str, Any] , ):
"""simple docstring"""
super().__init__(**A_ )
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = image_size
UpperCAmelCase__ = patch_size
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = qkv_bias
UpperCAmelCase__ = encoder_stride
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
lowerCAmelCase_ : int = version.parse("""1.11""" )
@property
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
return 1E-4
| 346 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''',
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: List[str] = '''gpt_bigcode'''
_lowerCamelCase: List[Any] = ['''past_key_values''']
_lowerCamelCase: int = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Optional[int] ,A_ : Dict=5_0257 ,A_ : Union[str, Any]=1024 ,A_ : str=768 ,A_ : Any=12 ,A_ : Any=12 ,A_ : Optional[int]=None ,A_ : Any="gelu_pytorch_tanh" ,A_ : List[str]=0.1 ,A_ : Optional[int]=0.1 ,A_ : List[str]=0.1 ,A_ : Tuple=1e-5 ,A_ : Optional[int]=0.02 ,A_ : List[str]=True ,A_ : Optional[Any]=True ,A_ : List[Any]=5_0256 ,A_ : Union[str, Any]=5_0256 ,A_ : int=True ,A_ : Optional[Any]=True ,A_ : Dict=True ,**A_ : Union[str, Any] ,) -> Union[str, Any]:
A = vocab_size
A = n_positions
A = n_embd
A = n_layer
A = n_head
A = n_inner
A = activation_function
A = resid_pdrop
A = embd_pdrop
A = attn_pdrop
A = layer_norm_epsilon
A = initializer_range
A = scale_attn_weights
A = use_cache
A = attention_softmax_in_fpaa
A = scale_attention_softmax_in_fpaa
A = multi_query
A = bos_token_id
A = eos_token_id
super().__init__(bos_token_id=A_ ,eos_token_id=A_ ,**A_ ) | 74 | 0 |
"""simple docstring"""
import argparse
import json
import subprocess
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = []
__A = (
f'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
''' https://api.github.com/repos/huggingface/transformers/actions/runners'''
)
__A = subprocess.run(snake_case__ , shell=snake_case__ , stdout=subprocess.PIPE )
__A = output.stdout.decode('''utf-8''' )
__A = json.loads(snake_case__ )
__A = status['''runners''']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(snake_case__ )
# save the result so we can report them on Slack
with open('''offline_runners.txt''' , '''w''' ) as fp:
fp.write(json.dumps(snake_case__ ) )
if len(snake_case__ ) > 0:
__A = '''\n'''.join([x['''name'''] for x in offline_runners] )
raise ValueError(f'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
return values.split(''',''' )
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--target_runners',
default=None,
type=list_str,
required=True,
help='Comma-separated list of runners to check status.',
)
parser.add_argument(
'--token', default=None, type=str, required=True, help='A token that has actions:read permission.'
)
lowercase_ = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 266 |
"""simple docstring"""
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
_lowercase = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F"""{bindir}/../../examples/pytorch/translation"""):
from run_translation import main # noqa
set_seed(42)
_lowercase = '''sshleifer/student_marian_en_ro_6_1'''
_lowercase = '''sshleifer/tiny-mbart'''
@require_torch
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Union[str, Any]=False ,A_ : Optional[int]=None ,A_ : List[str]=True ,A_ : Tuple=True ,A_ : Union[str, Any]=True ,A_ : List[str]=True ,) -> Tuple:
A = self.run_trainer(
eval_steps=1 ,max_len=12 ,model_name=A_ ,num_train_epochs=1 ,distributed=A_ ,extra_args_str=A_ ,predict_with_generate=A_ ,do_train=A_ ,do_eval=A_ ,do_predict=A_ ,)
A = TrainerState.load_from_json(os.path.join(A_ ,'trainer_state.json' ) ).log_history
if not do_eval:
return
A = [log for log in logs if 'eval_loss' in log.keys()]
A = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
A = eval_metrics[-1]
assert isinstance(last_step_stats['eval_bleu'] ,A_ )
assert not math.isnan(float(last_step_stats['eval_loss'] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def _SCREAMING_SNAKE_CASE ( self : int ) -> int:
self.run_seqaseq_quick(distributed=A_ )
@require_torch_multi_gpu
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
self.run_seqaseq_quick(distributed=A_ )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
self.run_seqaseq_quick(distributed=A_ ,extra_args_str='--sharded_ddp simple' )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def _SCREAMING_SNAKE_CASE ( self : Any ) -> int:
self.run_seqaseq_quick(distributed=A_ ,extra_args_str='--sharded_ddp simple --fp16' )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
self.run_seqaseq_quick(distributed=A_ ,extra_args_str='--sharded_ddp zero_dp_2' ,predict_with_generate=A_ )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
self.run_seqaseq_quick(
distributed=A_ ,extra_args_str='--sharded_ddp zero_dp_2 --fp16' ,predict_with_generate=A_ )
@require_apex
@require_torch_gpu
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=A_ ,extra_args_str='--fp16 --fp16_backend=apex' )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=A_ ,extra_args_str='--fp16 --fp16_backend=apex' )
@parameterized.expand(['base', 'low', 'high', 'mixed'] )
@require_torch_multi_gpu
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : Dict ) -> List[str]:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
A = {
# test with the default log_level - should be info and thus log info once
'base': {'extra_args_str': '', 'n_matches': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'low': {'extra_args_str': '--log_level debug --log_level_replica debug', 'n_matches': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'high': {'extra_args_str': '--log_level error --log_level_replica debug', 'n_matches': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'mixed': {'extra_args_str': '--log_level error --log_level_replica error', 'n_matches': 0},
}
A = experiments[experiment_id]
A = {'distributed': True, 'predict_with_generate': False, 'do_eval': False, 'do_predict': False}
A = 'Running training'
with CaptureStderr() as cl:
self.run_seqaseq_quick(**A_ ,extra_args_str=data['extra_args_str'] )
A = len(re.findall(A_ ,cl.err ) )
self.assertEqual(A_ ,data['n_matches'] )
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
A = self.run_trainer(
eval_steps=2 ,max_len=128 ,model_name=A_ ,learning_rate=3e-4 ,num_train_epochs=10 ,distributed=A_ ,)
# Check metrics
A = TrainerState.load_from_json(os.path.join(A_ ,'trainer_state.json' ) ).log_history
A = [log for log in logs if 'eval_loss' in log.keys()]
A = eval_metrics[0]
A = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['eval_bleu'] ,A_ )
# test if do_predict saves generations and metrics
A = os.listdir(A_ )
A = {os.path.basename(A_ ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(A_ : str ) -> Tuple[int, float]:
A = '--skip_memory_metrics 0'
A = self.run_trainer(
max_len=128 ,model_name=A_ ,learning_rate=3e-4 ,num_train_epochs=1 ,optim=A_ ,distributed=A_ ,extra_args_str=A_ ,do_eval=A_ ,do_predict=A_ ,n_gpus_to_use=1 ,)
# Check metrics
A = TrainerState.load_from_json(Path(A_ ,'trainer_state.json' ) ).log_history
A = int(logs[0]['train_mem_gpu_peaked_delta'] / 2**20 )
A = int(logs[0]['train_mem_gpu_alloc_delta'] / 2**20 )
A = logs[0]['train_loss']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
A , A , A = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
A , A , A = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
A = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
A = gpu_peak_mem_orig + gpu_alloc_mem_orig
A = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
A = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
A = 120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
A_ ,A_ ,'should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'
F' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'
F' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB' ,)
self.assertGreater(
A_ ,A_ ,'should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'
F' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'
F' gpu_total_mem_bnb={gpu_total_mem_bnb}MB' ,)
self.assertEqual(
A_ ,A_ ,F'loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}' )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : int ,A_ : str ,A_ : int ,A_ : float = 3e-3 ,A_ : str = "adafactor" ,A_ : bool = False ,A_ : str = None ,A_ : int = 0 ,A_ : bool = True ,A_ : bool = True ,A_ : bool = True ,A_ : bool = True ,A_ : int = None ,) -> Dict:
A = self.test_file_dir / '../fixtures/tests_samples/wmt_en_ro'
A = self.get_auto_remove_tmp_dir()
A = F'\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(A_ )}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(A_ )}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n '.split()
A = F'\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(A_ )}\n '.split()
A = '\n --do_predict\n '.split()
A = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F'--optim {optim}'.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
A = get_gpu_count()
A = get_torch_dist_unique_port()
A = F'\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n '.split()
A = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(A_ ,env=self.get_env() )
else:
A = ['run_translation.py'] + args
with patch.object(A_ ,'argv' ,A_ ):
main()
return output_dir | 74 | 0 |
import os
import sys
import unittest
lowerCAmelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
lowerCAmelCase__ = os.path.join(git_repo_path, """src""", """diffusers""")
class a__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
A__ = find_backend(" if not is_torch_available():" )
self.assertEqual(A_ , "torch" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
A__ = find_backend(" if not (is_torch_available() and is_transformers_available()):" )
self.assertEqual(A_ , "torch_and_transformers" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
A__ = find_backend(
" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):" )
self.assertEqual(A_ , "torch_and_transformers_and_onnx" )
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
A__ = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" , A_ )
self.assertIn("torch_and_transformers" , A_ )
self.assertIn("flax_and_transformers" , A_ )
self.assertIn("torch_and_transformers_and_onnx" , A_ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("UNet2DModel" , objects["torch"] )
self.assertIn("FlaxUNet2DConditionModel" , objects["flax"] )
self.assertIn("StableDiffusionPipeline" , objects["torch_and_transformers"] )
self.assertIn("FlaxStableDiffusionPipeline" , objects["flax_and_transformers"] )
self.assertIn("LMSDiscreteScheduler" , objects["torch_and_scipy"] )
self.assertIn("OnnxStableDiffusionPipeline" , objects["torch_and_transformers_and_onnx"] )
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
A__ = create_dummy_object("CONSTANT" , "\'torch\'" )
self.assertEqual(A_ , "\nCONSTANT = None\n" )
A__ = create_dummy_object("function" , "\'torch\'" )
self.assertEqual(
A_ , "\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n" )
A__ = "\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n"
A__ = create_dummy_object("FakeClass" , "\'torch\'" )
self.assertEqual(A_ , A_ )
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
A__ = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n"
A__ = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"] , A_ )
| 68 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''facebook/deit-base-distilled-patch16-224''': (
'''https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json'''
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[Any] = '''deit'''
def __init__( self : int ,A_ : Optional[Any]=768 ,A_ : Union[str, Any]=12 ,A_ : Dict=12 ,A_ : int=3072 ,A_ : Optional[Any]="gelu" ,A_ : Dict=0.0 ,A_ : Any=0.0 ,A_ : str=0.02 ,A_ : Tuple=1e-12 ,A_ : Union[str, Any]=224 ,A_ : Optional[Any]=16 ,A_ : List[Any]=3 ,A_ : Optional[Any]=True ,A_ : Optional[int]=16 ,**A_ : Union[str, Any] ,) -> Dict:
super().__init__(**A_ )
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = image_size
A = patch_size
A = num_channels
A = qkv_bias
A = encoder_stride
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: int = version.parse('''1.11''' )
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> float:
return 1e-4 | 74 | 0 |
"""simple docstring"""
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
lowerCAmelCase = (
"""https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py"""
)
lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowerCAmelCase_ ( ) ->List[Any]:
lowerCamelCase__ : List[str] ='https://pypi.org/pypi/diffusers/json'
lowerCamelCase__ : List[Any] =json.loads(request.urlopen(snake_case__ ).read() )['releases'].keys()
return sorted(snake_case__ , key=lambda snake_case_ : version.Version(snake_case__ ) )
def lowerCAmelCase_ ( ) ->Union[str, Any]:
# This function has already been executed if HF_MODULES_CACHE already is in the Python path.
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(snake_case__ )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
lowerCamelCase__ : List[Any] =Path(snake_case__ ) / '__init__.py'
if not init_path.exists():
init_path.touch()
def lowerCAmelCase_ ( snake_case_ : Union[str, os.PathLike] ) ->Union[str, Any]:
init_hf_modules()
lowerCamelCase__ : List[Any] =Path(snake_case__ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
lowerCamelCase__ : Optional[Any] =dynamic_module_path / '__init__.py'
if not init_path.exists():
init_path.touch()
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) ->Any:
with open(snake_case__ , 'r' , encoding='utf-8' ) as f:
lowerCamelCase__ : int =f.read()
# Imports of the form `import .xxx`
lowerCamelCase__ : int =re.findall('^\s*import\s+\.(\S+)\s*$' , snake_case__ , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('^\s*from\s+\.(\S+)\s+import' , snake_case__ , flags=re.MULTILINE )
# Unique-ify
return list(set(snake_case__ ) )
def lowerCAmelCase_ ( snake_case_ : str ) ->Optional[int]:
lowerCamelCase__ : Optional[int] =False
lowerCamelCase__ : int =[module_file]
lowerCamelCase__ : List[str] =[]
# Let's recurse through all relative imports
while not no_change:
lowerCamelCase__ : int =[]
for f in files_to_check:
new_imports.extend(get_relative_imports(snake_case__ ) )
lowerCamelCase__ : Dict =Path(snake_case__ ).parent
lowerCamelCase__ : int =[str(module_path / m ) for m in new_imports]
lowerCamelCase__ : Union[str, Any] =[f for f in new_import_files if f not in all_relative_imports]
lowerCamelCase__ : Union[str, Any] =[f"""{f}.py""" for f in new_import_files]
lowerCamelCase__ : Any =len(snake_case__ ) == 0
all_relative_imports.extend(snake_case__ )
return all_relative_imports
def lowerCAmelCase_ ( snake_case_ : int ) ->str:
with open(snake_case__ , 'r' , encoding='utf-8' ) as f:
lowerCamelCase__ : Optional[int] =f.read()
# Imports of the form `import xxx`
lowerCamelCase__ : List[Any] =re.findall('^\s*import\s+(\S+)\s*$' , snake_case__ , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('^\s*from\s+(\S+)\s+import' , snake_case__ , flags=re.MULTILINE )
# Only keep the top-level module
lowerCamelCase__ : Dict =[imp.split('.' )[0] for imp in imports if not imp.startswith('.' )]
# Unique-ify and test we got them all
lowerCamelCase__ : List[str] =list(set(snake_case__ ) )
lowerCamelCase__ : Union[str, Any] =[]
for imp in imports:
try:
importlib.import_module(snake_case__ )
except ImportError:
missing_packages.append(snake_case__ )
if len(snake_case__ ) > 0:
raise ImportError(
'This modeling file requires the following packages that were not found in your environment: '
f"""{', '.join(snake_case__ )}. Run `pip install {' '.join(snake_case__ )}`""" )
return get_relative_imports(snake_case__ )
def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : Optional[Any] ) ->Dict:
lowerCamelCase__ : str =module_path.replace(os.path.sep , '.' )
lowerCamelCase__ : Optional[int] =importlib.import_module(snake_case__ )
if class_name is None:
return find_pipeline_class(snake_case__ )
return getattr(snake_case__ , snake_case__ )
def lowerCAmelCase_ ( snake_case_ : List[str] ) ->Union[str, Any]:
from ..pipelines import DiffusionPipeline
lowerCamelCase__ : str =dict(inspect.getmembers(snake_case__ , inspect.isclass ) )
lowerCamelCase__ : Optional[int] =None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , snake_case__ )
and cls.__module__.split('.' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
f"""Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"""
f""" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"""
f""" {loaded_module}.""" )
lowerCamelCase__ : Tuple =cls
return pipeline_class
def lowerCAmelCase_ ( snake_case_ : Union[str, os.PathLike] , snake_case_ : str , snake_case_ : Optional[Union[str, os.PathLike]] = None , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : Optional[Dict[str, str]] = None , snake_case_ : Optional[Union[bool, str]] = None , snake_case_ : Optional[str] = None , snake_case_ : bool = False , ) ->List[Any]:
lowerCamelCase__ : Dict =str(snake_case__ )
lowerCamelCase__ : Dict =os.path.join(snake_case__ , snake_case__ )
if os.path.isfile(snake_case__ ):
lowerCamelCase__ : str =module_file_or_url
lowerCamelCase__ : List[str] ='local'
elif pretrained_model_name_or_path.count('/' ) == 0:
lowerCamelCase__ : Union[str, Any] =get_diffusers_versions()
# cut ".dev0"
lowerCamelCase__ : Optional[int] ='v' + '.'.join(__version__.split('.' )[:3] )
# retrieve github version that matches
if revision is None:
lowerCamelCase__ : Optional[Any] =latest_version if latest_version[1:] in available_versions else 'main'
logger.info(f"""Defaulting to latest_version: {revision}.""" )
elif revision in available_versions:
lowerCamelCase__ : Tuple =f"""v{revision}"""
elif revision == "main":
lowerCamelCase__ : List[str] =revision
else:
raise ValueError(
f"""`custom_revision`: {revision} does not exist. Please make sure to choose one of"""
f""" {', '.join(available_versions + ['main'] )}.""" )
# community pipeline on GitHub
lowerCamelCase__ : Optional[int] =COMMUNITY_PIPELINES_URL.format(revision=snake_case__ , pipeline=snake_case__ )
try:
lowerCamelCase__ : Optional[Any] =cached_download(
snake_case__ , cache_dir=snake_case__ , force_download=snake_case__ , proxies=snake_case__ , resume_download=snake_case__ , local_files_only=snake_case__ , use_auth_token=snake_case__ , )
lowerCamelCase__ : int ='git'
lowerCamelCase__ : Optional[Any] =pretrained_model_name_or_path + '.py'
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
else:
try:
# Load from URL or cache if already cached
lowerCamelCase__ : Tuple =hf_hub_download(
snake_case__ , snake_case__ , cache_dir=snake_case__ , force_download=snake_case__ , proxies=snake_case__ , resume_download=snake_case__ , local_files_only=snake_case__ , use_auth_token=snake_case__ , )
lowerCamelCase__ : Tuple =os.path.join('local' , '--'.join(pretrained_model_name_or_path.split('/' ) ) )
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
# Check we have all the requirements in our environment
lowerCamelCase__ : Union[str, Any] =check_imports(snake_case__ )
# Now we move the module inside our cached dynamic modules.
lowerCamelCase__ : Dict =DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(snake_case__ )
lowerCamelCase__ : Tuple =Path(snake_case__ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(snake_case__ , submodule_path / module_file )
for module_needed in modules_needed:
lowerCamelCase__ : List[str] =f"""{module_needed}.py"""
shutil.copy(os.path.join(snake_case__ , snake_case__ ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(snake_case__ , snake_case__ ):
lowerCamelCase__ : Dict =use_auth_token
elif use_auth_token is True:
lowerCamelCase__ : str =HfFolder.get_token()
else:
lowerCamelCase__ : Tuple =None
lowerCamelCase__ : Optional[int] =model_info(snake_case__ , revision=snake_case__ , token=snake_case__ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
lowerCamelCase__ : Dict =submodule_path / commit_hash
lowerCamelCase__ : Optional[Any] =full_submodule + os.path.sep + commit_hash
create_dynamic_module(snake_case__ )
if not (submodule_path / module_file).exists():
shutil.copy(snake_case__ , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
snake_case__ , f"""{module_needed}.py""" , cache_dir=snake_case__ , force_download=snake_case__ , resume_download=snake_case__ , proxies=snake_case__ , use_auth_token=snake_case__ , revision=snake_case__ , local_files_only=snake_case__ , )
return os.path.join(snake_case__ , snake_case__ )
def lowerCAmelCase_ ( snake_case_ : Union[str, os.PathLike] , snake_case_ : str , snake_case_ : Optional[str] = None , snake_case_ : Optional[Union[str, os.PathLike]] = None , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : Optional[Dict[str, str]] = None , snake_case_ : Optional[Union[bool, str]] = None , snake_case_ : Optional[str] = None , snake_case_ : bool = False , **snake_case_ : List[Any] , ) ->Optional[Any]:
lowerCamelCase__ : Dict =get_cached_module_file(
snake_case__ , snake_case__ , cache_dir=snake_case__ , force_download=snake_case__ , resume_download=snake_case__ , proxies=snake_case__ , use_auth_token=snake_case__ , revision=snake_case__ , local_files_only=snake_case__ , )
return get_class_in_module(snake_case__ , final_module.replace('.py' , '' ) ) | 126 |
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _snake_case ( snake_case__ : List[Any] , snake_case__ : Optional[int]=0.999 , snake_case__ : Union[str, Any]="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(snake_case__ : Union[str, Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(snake_case__ : Dict ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' )
A = []
for i in range(snake_case__ ):
A = i / num_diffusion_timesteps
A = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(snake_case__ ) / alpha_bar_fn(snake_case__ ) , snake_case__ ) )
return torch.tensor(snake_case__ , dtype=torch.floataa )
class lowerCAmelCase_ ( _lowercase , _lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[int] = [e.name for e in KarrasDiffusionSchedulers]
_lowerCamelCase: Optional[Any] = 2
@register_to_config
def __init__( self : str ,A_ : int = 1000 ,A_ : float = 0.0_00_85 ,A_ : float = 0.0_12 ,A_ : str = "linear" ,A_ : Optional[Union[np.ndarray, List[float]]] = None ,A_ : str = "epsilon" ,A_ : Optional[bool] = False ,A_ : Optional[bool] = False ,A_ : float = 1.0 ,A_ : str = "linspace" ,A_ : int = 0 ,) -> List[str]:
if trained_betas is not None:
A = torch.tensor(A_ ,dtype=torch.floataa )
elif beta_schedule == "linear":
A = torch.linspace(A_ ,A_ ,A_ ,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
A = (
torch.linspace(beta_start**0.5 ,beta_end**0.5 ,A_ ,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
A = betas_for_alpha_bar(A_ ,alpha_transform_type='cosine' )
elif beta_schedule == "exp":
A = betas_for_alpha_bar(A_ ,alpha_transform_type='exp' )
else:
raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}' )
A = 1.0 - self.betas
A = torch.cumprod(self.alphas ,dim=0 )
# set all values
self.set_timesteps(A_ ,A_ ,A_ )
A = use_karras_sigmas
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Tuple ,A_ : Tuple=None ) -> Tuple:
if schedule_timesteps is None:
A = self.timesteps
A = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
A = 1 if len(A_ ) > 1 else 0
else:
A = timestep.cpu().item() if torch.is_tensor(A_ ) else timestep
A = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : torch.FloatTensor ,A_ : Union[float, torch.FloatTensor] ,) -> torch.FloatTensor:
A = self.index_for_timestep(A_ )
A = self.sigmas[step_index]
A = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : int ,A_ : Union[str, torch.device] = None ,A_ : Optional[int] = None ,) -> Optional[Any]:
A = num_inference_steps
A = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
A = np.linspace(0 ,num_train_timesteps - 1 ,A_ ,dtype=A_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
A = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
A = (np.arange(0 ,A_ ) * step_ratio).round()[::-1].copy().astype(A_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
A = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
A = (np.arange(A_ ,0 ,-step_ratio )).round().copy().astype(A_ )
timesteps -= 1
else:
raise ValueError(
F'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' )
A = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
A = np.log(A_ )
A = np.interp(A_ ,np.arange(0 ,len(A_ ) ) ,A_ )
if self.config.use_karras_sigmas:
A = self._convert_to_karras(in_sigmas=A_ ,num_inference_steps=self.num_inference_steps )
A = np.array([self._sigma_to_t(A_ ,A_ ) for sigma in sigmas] )
A = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
A = torch.from_numpy(A_ ).to(device=A_ )
A = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
A = torch.from_numpy(A_ )
A = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(A_ ).startswith('mps' ):
# mps does not support float64
A = timesteps.to(A_ ,dtype=torch.floataa )
else:
A = timesteps.to(device=A_ )
# empty dt and derivative
A = None
A = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
A = defaultdict(A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[Any] ,A_ : List[str] ) -> Dict:
# get log sigma
A = np.log(A_ )
# get distribution
A = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
A = np.cumsum((dists >= 0) ,axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
A = low_idx + 1
A = log_sigmas[low_idx]
A = log_sigmas[high_idx]
# interpolate sigmas
A = (low - log_sigma) / (low - high)
A = np.clip(A_ ,0 ,1 )
# transform interpolation to time range
A = (1 - w) * low_idx + w * high_idx
A = t.reshape(sigma.shape )
return t
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : torch.FloatTensor ,A_ : int ) -> torch.FloatTensor:
A = in_sigmas[-1].item()
A = in_sigmas[0].item()
A = 7.0 # 7.0 is the value used in the paper
A = np.linspace(0 ,1 ,A_ )
A = sigma_min ** (1 / rho)
A = sigma_max ** (1 / rho)
A = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
return self.dt is None
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Union[torch.FloatTensor, np.ndarray] ,A_ : Union[float, torch.FloatTensor] ,A_ : Union[torch.FloatTensor, np.ndarray] ,A_ : bool = True ,) -> Union[SchedulerOutput, Tuple]:
A = self.index_for_timestep(A_ )
# advance index counter by 1
A = timestep.cpu().item() if torch.is_tensor(A_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
A = self.sigmas[step_index]
A = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
A = self.sigmas[step_index - 1]
A = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
A = 0
A = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
A = sigma_hat if self.state_in_first_order else sigma_next
A = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
A = sigma_hat if self.state_in_first_order else sigma_next
A = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
A = model_output
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' )
if self.config.clip_sample:
A = pred_original_sample.clamp(
-self.config.clip_sample_range ,self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
A = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
A = sigma_next - sigma_hat
# store for 2nd order step
A = derivative
A = dt
A = sample
else:
# 2. 2nd order / Heun's method
A = (sample - pred_original_sample) / sigma_next
A = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
A = self.dt
A = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
A = None
A = None
A = None
A = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=A_ )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : torch.FloatTensor ,A_ : torch.FloatTensor ,A_ : torch.FloatTensor ,) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
A = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(A_ ):
# mps does not support float64
A = self.timesteps.to(original_samples.device ,dtype=torch.floataa )
A = timesteps.to(original_samples.device ,dtype=torch.floataa )
else:
A = self.timesteps.to(original_samples.device )
A = timesteps.to(original_samples.device )
A = [self.index_for_timestep(A_ ,A_ ) for t in timesteps]
A = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
A = sigma.unsqueeze(-1 )
A = original_samples + noise * sigma
return noisy_samples
def __len__( self : Dict ) -> int:
return self.config.num_train_timesteps | 74 | 0 |
import itertools
import string
from collections.abc import Generator, Iterable
def UpperCAmelCase__ ( _A : Iterable[str] , _A : int ):
'''simple docstring'''
a__ =iter(snake_case__ )
while True:
a__ =tuple(itertools.islice(snake_case__ , snake_case__ ) )
if not chunk:
return
yield chunk
def UpperCAmelCase__ ( _A : str ):
'''simple docstring'''
a__ =''''''.join([c.upper() for c in dirty if c in string.ascii_letters] )
a__ =''''''
if len(snake_case__ ) < 2:
return dirty
for i in range(len(snake_case__ ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(snake_case__ ) & 1:
clean += "X"
return clean
def UpperCAmelCase__ ( _A : str ):
'''simple docstring'''
a__ ='''ABCDEFGHIKLMNOPQRSTUVWXYZ'''
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
a__ =[]
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(snake_case__ )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(snake_case__ )
return table
def UpperCAmelCase__ ( _A : str , _A : str ):
'''simple docstring'''
a__ =generate_table(snake_case__ )
a__ =prepare_input(snake_case__ )
a__ =''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(snake_case__ , 2 ):
a__, a__ =divmod(table.index(snake_case__ ) , 5 )
a__, a__ =divmod(table.index(snake_case__ ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def UpperCAmelCase__ ( _A : str , _A : str ):
'''simple docstring'''
a__ =generate_table(snake_case__ )
a__ =''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(snake_case__ , 2 ):
a__, a__ =divmod(table.index(snake_case__ ) , 5 )
a__, a__ =divmod(table.index(snake_case__ ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 188 |
"""simple docstring"""
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Dict ,A_ : list[int] ) -> None:
A = len(A_ )
A = [0] * len_array
if len_array > 0:
A = array[0]
for i in range(1 ,A_ ):
A = self.prefix_sum[i - 1] + array[i]
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : int ,A_ : int ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : int ) -> bool:
A = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(A_ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod() | 74 | 0 |
'''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
UpperCAmelCase : str = logging.getLogger(__name__)
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser(
description="""Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).""" )
parser.add_argument("""--file_path""" , type=snake_case__ , default="""data/dump.txt""" , help="""The path to the data.""" )
parser.add_argument("""--tokenizer_type""" , type=snake_case__ , default="""bert""" , choices=["""bert""", """roberta""", """gpt2"""] )
parser.add_argument("""--tokenizer_name""" , type=snake_case__ , default="""bert-base-uncased""" , help="""The tokenizer to use.""" )
parser.add_argument("""--dump_file""" , type=snake_case__ , default="""data/dump""" , help="""The dump file prefix.""" )
__SCREAMING_SNAKE_CASE = parser.parse_args()
logger.info(F'Loading Tokenizer ({args.tokenizer_name})' )
if args.tokenizer_type == "bert":
__SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained(args.tokenizer_name )
__SCREAMING_SNAKE_CASE = tokenizer.special_tokens_map["""cls_token"""] # `[CLS]`
__SCREAMING_SNAKE_CASE = tokenizer.special_tokens_map["""sep_token"""] # `[SEP]`
elif args.tokenizer_type == "roberta":
__SCREAMING_SNAKE_CASE = RobertaTokenizer.from_pretrained(args.tokenizer_name )
__SCREAMING_SNAKE_CASE = tokenizer.special_tokens_map["""cls_token"""] # `<s>`
__SCREAMING_SNAKE_CASE = tokenizer.special_tokens_map["""sep_token"""] # `</s>`
elif args.tokenizer_type == "gpt2":
__SCREAMING_SNAKE_CASE = GPTaTokenizer.from_pretrained(args.tokenizer_name )
__SCREAMING_SNAKE_CASE = tokenizer.special_tokens_map["""bos_token"""] # `<|endoftext|>`
__SCREAMING_SNAKE_CASE = tokenizer.special_tokens_map["""eos_token"""] # `<|endoftext|>`
logger.info(F'Loading text from {args.file_path}' )
with open(args.file_path , """r""" , encoding="""utf8""" ) as fp:
__SCREAMING_SNAKE_CASE = fp.readlines()
logger.info("""Start encoding""" )
logger.info(F'{len(snake_case__ )} examples to process.' )
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 1_00_00
__SCREAMING_SNAKE_CASE = time.time()
for text in data:
__SCREAMING_SNAKE_CASE = F'{bos} {text.strip()} {sep}'
__SCREAMING_SNAKE_CASE = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
rslt.append(snake_case__ )
iter += 1
if iter % interval == 0:
__SCREAMING_SNAKE_CASE = time.time()
logger.info(F'{iter} examples processed. - {(end-start):.2f}s/{interval}expl' )
__SCREAMING_SNAKE_CASE = time.time()
logger.info("""Finished binarization""" )
logger.info(F'{len(snake_case__ )} examples processed.' )
__SCREAMING_SNAKE_CASE = F'{args.dump_file}.{args.tokenizer_name}.pickle'
__SCREAMING_SNAKE_CASE = tokenizer.vocab_size
if vocab_size < (1 << 16):
__SCREAMING_SNAKE_CASE = [np.uintaa(snake_case__ ) for d in rslt]
else:
__SCREAMING_SNAKE_CASE = [np.intaa(snake_case__ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F'Dump to {dp_file}' )
with open(snake_case__ , """wb""" ) as handle:
pickle.dump(rslt_ , snake_case__ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 267 |
"""simple docstring"""
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
def _snake_case ( snake_case__ : str , snake_case__ : str ):
A = RobertaPreLayerNormConfig.from_pretrained(
snake_case__ , architectures=['RobertaPreLayerNormForMaskedLM'] )
# convert state_dict
A = torch.load(hf_hub_download(repo_id=snake_case__ , filename='pytorch_model.bin' ) )
A = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('roberta.' ):
A = 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ):
continue
A = tensor_value
A = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=snake_case__ , config=snake_case__ , state_dict=snake_case__ )
model.save_pretrained(snake_case__ )
# convert tokenizer
A = AutoTokenizer.from_pretrained(snake_case__ )
tokenizer.save_pretrained(snake_case__ )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint-repo''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowercase = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path) | 74 | 0 |
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class a_ :
"""simple docstring"""
@staticmethod
def _lowerCAmelCase ( *snake_case : Optional[int] ,**snake_case : List[Any] ):
pass
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
_lowerCamelCase =(
"https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png"
)
@is_pipeline_test
@require_torch
@require_vision
class a_ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def _lowerCAmelCase ( self : Optional[Any] ,snake_case : int ,snake_case : List[Any] ,snake_case : Dict ):
SCREAMING_SNAKE_CASE =pipeline(
'document-question-answering' ,model=A_ ,tokenizer=A_ ,image_processor=A_ )
SCREAMING_SNAKE_CASE =INVOICE_URL
SCREAMING_SNAKE_CASE =list(zip(*apply_tesseract(load_image(A_ ) ,A_ ,'' ) ) )
SCREAMING_SNAKE_CASE ='What is the placebo?'
SCREAMING_SNAKE_CASE =[
{
'image': load_image(A_ ),
'question': question,
},
{
'image': image,
'question': question,
},
{
'image': image,
'question': question,
'word_boxes': word_boxes,
},
]
return dqa_pipeline, examples
def _lowerCAmelCase ( self : Optional[Any] ,snake_case : Dict ,snake_case : List[Any] ):
SCREAMING_SNAKE_CASE =dqa_pipeline(A_ ,top_k=2 )
self.assertEqual(
A_ ,[
[
{'score': ANY(A_ ), 'answer': ANY(A_ ), 'start': ANY(A_ ), 'end': ANY(A_ )},
{'score': ANY(A_ ), 'answer': ANY(A_ ), 'start': ANY(A_ ), 'end': ANY(A_ )},
]
]
* 3 ,)
@require_torch
@require_detectrona
@require_pytesseract
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =pipeline('document-question-answering' ,model='hf-internal-testing/tiny-random-layoutlmv2' )
SCREAMING_SNAKE_CASE =INVOICE_URL
SCREAMING_SNAKE_CASE ='How many cats are there?'
SCREAMING_SNAKE_CASE =[
{'score': 0.0_001, 'answer': 'oy 2312/2019', 'start': 38, 'end': 39},
{'score': 0.0_001, 'answer': 'oy 2312/2019 DUE', 'start': 38, 'end': 40},
]
SCREAMING_SNAKE_CASE =dqa_pipeline(image=A_ ,question=A_ ,top_k=2 )
self.assertEqual(nested_simplify(A_ ,decimals=4 ) ,A_ )
SCREAMING_SNAKE_CASE =dqa_pipeline({'image': image, 'question': question} ,top_k=2 )
self.assertEqual(nested_simplify(A_ ,decimals=4 ) ,A_ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
SCREAMING_SNAKE_CASE ='./tests/fixtures/tests_samples/COCO/000000039769.png'
SCREAMING_SNAKE_CASE =dqa_pipeline(image=A_ ,question=A_ ,top_k=2 )
self.assertEqual(A_ ,[] )
# We can optionnally pass directly the words and bounding boxes
SCREAMING_SNAKE_CASE ='./tests/fixtures/tests_samples/COCO/000000039769.png'
SCREAMING_SNAKE_CASE =[]
SCREAMING_SNAKE_CASE =[]
SCREAMING_SNAKE_CASE =dqa_pipeline(image=A_ ,question=A_ ,words=A_ ,boxes=A_ ,top_k=2 )
self.assertEqual(A_ ,[] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =pipeline(
'document-question-answering' ,model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' ,revision='9977165' ,)
SCREAMING_SNAKE_CASE =INVOICE_URL
SCREAMING_SNAKE_CASE ='What is the invoice number?'
SCREAMING_SNAKE_CASE =dqa_pipeline(image=A_ ,question=A_ ,top_k=2 )
self.assertEqual(
nested_simplify(A_ ,decimals=4 ) ,[
{'score': 0.9_944, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_009, 'answer': 'us-001', 'start': 16, 'end': 16},
] ,)
SCREAMING_SNAKE_CASE =dqa_pipeline({'image': image, 'question': question} ,top_k=2 )
self.assertEqual(
nested_simplify(A_ ,decimals=4 ) ,[
{'score': 0.9_944, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_009, 'answer': 'us-001', 'start': 16, 'end': 16},
] ,)
SCREAMING_SNAKE_CASE =dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] ,top_k=2 )
self.assertEqual(
nested_simplify(A_ ,decimals=4 ) ,[
[
{'score': 0.9_944, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_009, 'answer': 'us-001', 'start': 16, 'end': 16},
],
]
* 2 ,)
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =pipeline(
'document-question-answering' ,model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' ,revision='9977165' ,max_seq_len=50 ,)
SCREAMING_SNAKE_CASE =INVOICE_URL
SCREAMING_SNAKE_CASE ='What is the invoice number?'
SCREAMING_SNAKE_CASE =dqa_pipeline(image=A_ ,question=A_ ,top_k=2 )
self.assertEqual(
nested_simplify(A_ ,decimals=4 ) ,[
{'score': 0.9_974, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.9_948, 'answer': 'us-001', 'start': 16, 'end': 16},
] ,)
SCREAMING_SNAKE_CASE =dqa_pipeline({'image': image, 'question': question} ,top_k=2 )
self.assertEqual(
nested_simplify(A_ ,decimals=4 ) ,[
{'score': 0.9_974, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.9_948, 'answer': 'us-001', 'start': 16, 'end': 16},
] ,)
SCREAMING_SNAKE_CASE =dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] ,top_k=2 )
self.assertEqual(
nested_simplify(A_ ,decimals=4 ) ,[
[
{'score': 0.9_974, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.9_948, 'answer': 'us-001', 'start': 16, 'end': 16},
]
]
* 2 ,)
@slow
@require_torch
@require_pytesseract
@require_vision
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' ,revision='3dc6de3' ,add_prefix_space=A_ )
SCREAMING_SNAKE_CASE =pipeline(
'document-question-answering' ,model='impira/layoutlm-document-qa' ,tokenizer=A_ ,revision='3dc6de3' ,)
SCREAMING_SNAKE_CASE =INVOICE_URL
SCREAMING_SNAKE_CASE ='What is the invoice number?'
SCREAMING_SNAKE_CASE =dqa_pipeline(image=A_ ,question=A_ ,top_k=2 )
self.assertEqual(
nested_simplify(A_ ,decimals=4 ) ,[
{'score': 0.4_251, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_819, 'answer': '1110212019', 'start': 23, 'end': 23},
] ,)
SCREAMING_SNAKE_CASE =dqa_pipeline({'image': image, 'question': question} ,top_k=2 )
self.assertEqual(
nested_simplify(A_ ,decimals=4 ) ,[
{'score': 0.4_251, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_819, 'answer': '1110212019', 'start': 23, 'end': 23},
] ,)
SCREAMING_SNAKE_CASE =dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] ,top_k=2 )
self.assertEqual(
nested_simplify(A_ ,decimals=4 ) ,[
[
{'score': 0.4_251, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_819, 'answer': '1110212019', 'start': 23, 'end': 23},
]
]
* 2 ,)
SCREAMING_SNAKE_CASE =list(zip(*apply_tesseract(load_image(A_ ) ,A_ ,'' ) ) )
# This model should also work if `image` is set to None
SCREAMING_SNAKE_CASE =dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} ,top_k=2 )
self.assertEqual(
nested_simplify(A_ ,decimals=4 ) ,[
{'score': 0.4_251, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_819, 'answer': '1110212019', 'start': 23, 'end': 23},
] ,)
@slow
@require_torch
@require_pytesseract
@require_vision
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' ,revision='3dc6de3' ,add_prefix_space=A_ )
SCREAMING_SNAKE_CASE =pipeline(
'document-question-answering' ,model='impira/layoutlm-document-qa' ,tokenizer=A_ ,revision='3dc6de3' ,max_seq_len=50 ,)
SCREAMING_SNAKE_CASE =INVOICE_URL
SCREAMING_SNAKE_CASE ='What is the invoice number?'
SCREAMING_SNAKE_CASE =dqa_pipeline(image=A_ ,question=A_ ,top_k=2 )
self.assertEqual(
nested_simplify(A_ ,decimals=4 ) ,[
{'score': 0.9_999, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.9_998, 'answer': 'us-001', 'start': 16, 'end': 16},
] ,)
SCREAMING_SNAKE_CASE =dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] ,top_k=2 )
self.assertEqual(
nested_simplify(A_ ,decimals=4 ) ,[
[
{'score': 0.9_999, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.9_998, 'answer': 'us-001', 'start': 16, 'end': 16},
]
]
* 2 ,)
SCREAMING_SNAKE_CASE =list(zip(*apply_tesseract(load_image(A_ ) ,A_ ,'' ) ) )
# This model should also work if `image` is set to None
SCREAMING_SNAKE_CASE =dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} ,top_k=2 )
self.assertEqual(
nested_simplify(A_ ,decimals=4 ) ,[
{'score': 0.9_999, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.9_998, 'answer': 'us-001', 'start': 16, 'end': 16},
] ,)
@slow
@require_torch
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =pipeline(
'document-question-answering' ,model='naver-clova-ix/donut-base-finetuned-docvqa' ,tokenizer=AutoTokenizer.from_pretrained('naver-clova-ix/donut-base-finetuned-docvqa' ) ,feature_extractor='naver-clova-ix/donut-base-finetuned-docvqa' ,)
SCREAMING_SNAKE_CASE =INVOICE_URL
SCREAMING_SNAKE_CASE ='What is the invoice number?'
SCREAMING_SNAKE_CASE =dqa_pipeline(image=A_ ,question=A_ ,top_k=2 )
self.assertEqual(nested_simplify(A_ ,decimals=4 ) ,[{'answer': 'us-001'}] )
@require_tf
@unittest.skip('Document question answering not implemented in TF' )
def _lowerCAmelCase ( self : Any ):
pass
| 334 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'''
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[Any] = '''roformer'''
def __init__( self : Tuple ,A_ : Optional[int]=5_0000 ,A_ : Tuple=None ,A_ : Optional[Any]=768 ,A_ : Dict=12 ,A_ : Optional[int]=12 ,A_ : Union[str, Any]=3072 ,A_ : Dict="gelu" ,A_ : Dict=0.1 ,A_ : List[Any]=0.1 ,A_ : List[Any]=1536 ,A_ : List[str]=2 ,A_ : Any=0.02 ,A_ : str=1e-12 ,A_ : Optional[int]=0 ,A_ : List[str]=False ,A_ : Tuple=True ,**A_ : List[str] ,) -> Dict:
super().__init__(pad_token_id=A_ ,**A_ )
A = vocab_size
A = hidden_size if embedding_size is None else embedding_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = hidden_act
A = intermediate_size
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = initializer_range
A = layer_norm_eps
A = rotary_value
A = use_cache
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
A = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A = {0: 'batch', 1: 'sequence'}
A = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] ) | 74 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class UpperCamelCase__( _lowercase ):
lowerCAmelCase__ : List[Any] = '''transfo-xl'''
lowerCAmelCase__ : Tuple = ['''mems''']
lowerCAmelCase__ : Tuple = {
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self ,__UpperCAmelCase=26_77_35 ,__UpperCAmelCase=[2_00_00, 4_00_00, 20_00_00] ,__UpperCAmelCase=10_24 ,__UpperCAmelCase=10_24 ,__UpperCAmelCase=16 ,__UpperCAmelCase=64 ,__UpperCAmelCase=40_96 ,__UpperCAmelCase=4 ,__UpperCAmelCase=False ,__UpperCAmelCase=18 ,__UpperCAmelCase=16_00 ,__UpperCAmelCase=10_00 ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=0 ,__UpperCAmelCase=-1 ,__UpperCAmelCase=True ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=True ,__UpperCAmelCase="normal" ,__UpperCAmelCase=0.0_1 ,__UpperCAmelCase=0.0_1 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=1e-5 ,__UpperCAmelCase=0 ,**__UpperCAmelCase ,) -> int:
A__ = vocab_size
A__ = []
self.cutoffs.extend(A_ )
if proj_share_all_but_first:
A__ = [False] + [True] * len(self.cutoffs )
else:
A__ = [False] + [False] * len(self.cutoffs )
A__ = d_model
A__ = d_embed
A__ = d_head
A__ = d_inner
A__ = div_val
A__ = pre_lnorm
A__ = n_layer
A__ = n_head
A__ = mem_len
A__ = same_length
A__ = attn_type
A__ = clamp_len
A__ = sample_softmax
A__ = adaptive
A__ = dropout
A__ = dropatt
A__ = untie_r
A__ = init
A__ = init_range
A__ = proj_init_std
A__ = init_std
A__ = layer_norm_epsilon
super().__init__(eos_token_id=A_ ,**A_ )
@property
def snake_case__ ( self ) -> Optional[int]:
# Message copied from Transformer-XL documentation
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def snake_case__ ( self ,__UpperCAmelCase ) -> str:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 221 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def _snake_case ( snake_case__ : Dict ):
A = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
'decoder.output_projection.weight',
]
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def _snake_case ( snake_case__ : int ):
A , A = emb.weight.shape
A = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ )
A = emb.weight.data
return lin_layer
def _snake_case ( snake_case__ : List[str] , snake_case__ : Any="facebook/mbart-large-en-ro" , snake_case__ : Optional[int]=False , snake_case__ : List[str]=False ):
A = torch.load(snake_case__ , map_location='cpu' )['model']
remove_ignore_keys_(snake_case__ )
A = state_dict['encoder.embed_tokens.weight'].shape[0]
A = MBartConfig.from_pretrained(snake_case__ , vocab_size=snake_case__ )
if mbart_aa and finetuned:
A = 'relu'
A = state_dict['decoder.embed_tokens.weight']
A = MBartForConditionalGeneration(snake_case__ )
model.model.load_state_dict(snake_case__ )
if finetuned:
A = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
_lowercase = parser.parse_args()
_lowercase = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path) | 74 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE : str = {
"""configuration_owlvit""": [
"""OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""OwlViTConfig""",
"""OwlViTOnnxConfig""",
"""OwlViTTextConfig""",
"""OwlViTVisionConfig""",
],
"""processing_owlvit""": ["""OwlViTProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[Any] = ["""OwlViTFeatureExtractor"""]
SCREAMING_SNAKE_CASE : List[Any] = ["""OwlViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[int] = [
"""OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""OwlViTModel""",
"""OwlViTPreTrainedModel""",
"""OwlViTTextModel""",
"""OwlViTVisionModel""",
"""OwlViTForObjectDetection""",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 102 |
"""simple docstring"""
import argparse
import struct
import unittest
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Tuple ,A_ : bytes ) -> None:
A = data
# Initialize hash values
A = [
0X6_A_0_9_E_6_6_7,
0XB_B_6_7_A_E_8_5,
0X3_C_6_E_F_3_7_2,
0XA_5_4_F_F_5_3_A,
0X5_1_0_E_5_2_7_F,
0X9_B_0_5_6_8_8_C,
0X1_F_8_3_D_9_A_B,
0X5_B_E_0_C_D_1_9,
]
# Initialize round constants
A = [
0X4_2_8_A_2_F_9_8,
0X7_1_3_7_4_4_9_1,
0XB_5_C_0_F_B_C_F,
0XE_9_B_5_D_B_A_5,
0X3_9_5_6_C_2_5_B,
0X5_9_F_1_1_1_F_1,
0X9_2_3_F_8_2_A_4,
0XA_B_1_C_5_E_D_5,
0XD_8_0_7_A_A_9_8,
0X1_2_8_3_5_B_0_1,
0X2_4_3_1_8_5_B_E,
0X5_5_0_C_7_D_C_3,
0X7_2_B_E_5_D_7_4,
0X8_0_D_E_B_1_F_E,
0X9_B_D_C_0_6_A_7,
0XC_1_9_B_F_1_7_4,
0XE_4_9_B_6_9_C_1,
0XE_F_B_E_4_7_8_6,
0X0_F_C_1_9_D_C_6,
0X2_4_0_C_A_1_C_C,
0X2_D_E_9_2_C_6_F,
0X4_A_7_4_8_4_A_A,
0X5_C_B_0_A_9_D_C,
0X7_6_F_9_8_8_D_A,
0X9_8_3_E_5_1_5_2,
0XA_8_3_1_C_6_6_D,
0XB_0_0_3_2_7_C_8,
0XB_F_5_9_7_F_C_7,
0XC_6_E_0_0_B_F_3,
0XD_5_A_7_9_1_4_7,
0X0_6_C_A_6_3_5_1,
0X1_4_2_9_2_9_6_7,
0X2_7_B_7_0_A_8_5,
0X2_E_1_B_2_1_3_8,
0X4_D_2_C_6_D_F_C,
0X5_3_3_8_0_D_1_3,
0X6_5_0_A_7_3_5_4,
0X7_6_6_A_0_A_B_B,
0X8_1_C_2_C_9_2_E,
0X9_2_7_2_2_C_8_5,
0XA_2_B_F_E_8_A_1,
0XA_8_1_A_6_6_4_B,
0XC_2_4_B_8_B_7_0,
0XC_7_6_C_5_1_A_3,
0XD_1_9_2_E_8_1_9,
0XD_6_9_9_0_6_2_4,
0XF_4_0_E_3_5_8_5,
0X1_0_6_A_A_0_7_0,
0X1_9_A_4_C_1_1_6,
0X1_E_3_7_6_C_0_8,
0X2_7_4_8_7_7_4_C,
0X3_4_B_0_B_C_B_5,
0X3_9_1_C_0_C_B_3,
0X4_E_D_8_A_A_4_A,
0X5_B_9_C_C_A_4_F,
0X6_8_2_E_6_F_F_3,
0X7_4_8_F_8_2_E_E,
0X7_8_A_5_6_3_6_F,
0X8_4_C_8_7_8_1_4,
0X8_C_C_7_0_2_0_8,
0X9_0_B_E_F_F_F_A,
0XA_4_5_0_6_C_E_B,
0XB_E_F_9_A_3_F_7,
0XC_6_7_1_7_8_F_2,
]
A = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : bytes ) -> bytes:
A = B'\x80' + (B'\x00' * (63 - (len(A_ ) + 8) % 64))
A = struct.pack('>Q' ,(len(A_ ) * 8) )
return data + padding + big_endian_integer
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> None:
# Convert into blocks of 64 bytes
A = [
self.preprocessed_data[x : x + 64]
for x in range(0 ,len(self.preprocessed_data ) ,64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
A = list(struct.unpack('>16L' ,A_ ) )
# add 48 0-ed integers
words += [0] * 48
A , A , A , A , A , A , A , A = self.hashes
for index in range(0 ,64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
A = (
self.ror(words[index - 15] ,7 )
^ self.ror(words[index - 15] ,18 )
^ (words[index - 15] >> 3)
)
A = (
self.ror(words[index - 2] ,17 )
^ self.ror(words[index - 2] ,19 )
^ (words[index - 2] >> 10)
)
A = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X1_0_0_0_0_0_0_0_0
# Compression
A = self.ror(A_ ,6 ) ^ self.ror(A_ ,11 ) ^ self.ror(A_ ,25 )
A = (e & f) ^ ((~e & 0XF_F_F_F_F_F_F_F) & g)
A = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X1_0_0_0_0_0_0_0_0
A = self.ror(A_ ,2 ) ^ self.ror(A_ ,13 ) ^ self.ror(A_ ,22 )
A = (a & b) ^ (a & c) ^ (b & c)
A = (sa + maj) % 0X1_0_0_0_0_0_0_0_0
A , A , A , A , A , A , A , A = (
g,
f,
e,
((d + tempa) % 0X1_0_0_0_0_0_0_0_0),
c,
b,
a,
((tempa + tempa) % 0X1_0_0_0_0_0_0_0_0),
)
A = [a, b, c, d, e, f, g, h]
# Modify final values
A = [
((element + mutated_hash_values[index]) % 0X1_0_0_0_0_0_0_0_0)
for index, element in enumerate(self.hashes )
]
A = ''.join([hex(A_ )[2:].zfill(8 ) for value in self.hashes] )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : int ,A_ : int ) -> int:
return 0XF_F_F_F_F_F_F_F & (value << (32 - rotations)) | (value >> rotations)
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> None:
import hashlib
A = bytes('Test String' ,'utf-8' )
self.assertEqual(SHAaaa(A_ ).hash ,hashlib.shaaaa(A_ ).hexdigest() )
def _snake_case ( ):
import doctest
doctest.testmod()
A = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
A = parser.parse_args()
A = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
A = f.read()
else:
A = bytes(snake_case__ , 'utf-8' )
print(SHAaaa(snake_case__ ).hash )
if __name__ == "__main__":
main() | 74 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''',
}
class lowerCamelCase_ ( _lowercase ):
'''simple docstring'''
a__ : Union[str, Any] = '''instructblip_vision_model'''
def __init__( self , __lowercase=1_408 , __lowercase=6_144 , __lowercase=39 , __lowercase=16 , __lowercase=224 , __lowercase=14 , __lowercase="gelu" , __lowercase=1E-6 , __lowercase=0.0 , __lowercase=1E-1_0 , __lowercase=True , **__lowercase , ) -> Tuple:
super().__init__(**A_)
__UpperCamelCase :Union[str, Any] = hidden_size
__UpperCamelCase :Any = intermediate_size
__UpperCamelCase :Union[str, Any] = num_hidden_layers
__UpperCamelCase :Optional[int] = num_attention_heads
__UpperCamelCase :int = patch_size
__UpperCamelCase :List[str] = image_size
__UpperCamelCase :Any = initializer_range
__UpperCamelCase :Union[str, Any] = attention_dropout
__UpperCamelCase :Tuple = layer_norm_eps
__UpperCamelCase :List[Any] = hidden_act
__UpperCamelCase :Optional[Any] = qkv_bias
@classmethod
def UpperCamelCase__ ( cls , __lowercase , **__lowercase) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_)
__UpperCamelCase , __UpperCamelCase :List[str] = cls.get_config_dict(A_ , **A_)
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('''model_type''') == "instructblip":
__UpperCamelCase :Dict = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(A_ , **A_)
class lowerCamelCase_ ( _lowercase ):
'''simple docstring'''
a__ : Union[str, Any] = '''instructblip_qformer'''
def __init__( self , __lowercase=30_522 , __lowercase=768 , __lowercase=12 , __lowercase=12 , __lowercase=3_072 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=512 , __lowercase=0.02 , __lowercase=1E-1_2 , __lowercase=0 , __lowercase="absolute" , __lowercase=2 , __lowercase=1_408 , **__lowercase , ) -> Tuple:
super().__init__(pad_token_id=A_ , **A_)
__UpperCamelCase :Any = vocab_size
__UpperCamelCase :int = hidden_size
__UpperCamelCase :Union[str, Any] = num_hidden_layers
__UpperCamelCase :Any = num_attention_heads
__UpperCamelCase :int = hidden_act
__UpperCamelCase :List[Any] = intermediate_size
__UpperCamelCase :Any = hidden_dropout_prob
__UpperCamelCase :Tuple = attention_probs_dropout_prob
__UpperCamelCase :List[str] = max_position_embeddings
__UpperCamelCase :Optional[Any] = initializer_range
__UpperCamelCase :List[Any] = layer_norm_eps
__UpperCamelCase :Optional[Any] = position_embedding_type
__UpperCamelCase :Tuple = cross_attention_frequency
__UpperCamelCase :Union[str, Any] = encoder_hidden_size
@classmethod
def UpperCamelCase__ ( cls , __lowercase , **__lowercase) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_)
__UpperCamelCase , __UpperCamelCase :Any = cls.get_config_dict(A_ , **A_)
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('''model_type''') == "instructblip":
__UpperCamelCase :Optional[Any] = config_dict['''qformer_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(A_ , **A_)
class lowerCamelCase_ ( _lowercase ):
'''simple docstring'''
a__ : Dict = '''instructblip'''
a__ : List[Any] = True
def __init__( self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=32 , **__lowercase) -> Optional[int]:
super().__init__(**A_)
if vision_config is None:
__UpperCamelCase :Dict = {}
logger.info('''vision_config is None. initializing the InstructBlipVisionConfig with default values.''')
if qformer_config is None:
__UpperCamelCase :Any = {}
logger.info('''qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.''')
if text_config is None:
__UpperCamelCase :Dict = {}
logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''')
__UpperCamelCase :Optional[Any] = InstructBlipVisionConfig(**A_)
__UpperCamelCase :Dict = InstructBlipQFormerConfig(**A_)
__UpperCamelCase :List[str] = text_config['''model_type'''] if '''model_type''' in text_config else '''opt'''
__UpperCamelCase :int = CONFIG_MAPPING[text_model_type](**A_)
__UpperCamelCase :List[str] = self.text_config.tie_word_embeddings
__UpperCamelCase :List[str] = self.text_config.is_encoder_decoder
__UpperCamelCase :Optional[int] = num_query_tokens
__UpperCamelCase :List[str] = self.vision_config.hidden_size
__UpperCamelCase :str = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__UpperCamelCase :Union[str, Any] = 1.0
__UpperCamelCase :Union[str, Any] = 0.02
@classmethod
def UpperCamelCase__ ( cls , __lowercase , __lowercase , __lowercase , **__lowercase , ) -> List[Any]:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **A_ , )
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :List[Any] = copy.deepcopy(self.__dict__)
__UpperCamelCase :Any = self.vision_config.to_dict()
__UpperCamelCase :Any = self.qformer_config.to_dict()
__UpperCamelCase :Dict = self.text_config.to_dict()
__UpperCamelCase :List[str] = self.__class__.model_type
return output
| 43 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowercase = {'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''DeiTFeatureExtractor''']
_lowercase = ['''DeiTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DeiTForImageClassification''',
'''DeiTForImageClassificationWithTeacher''',
'''DeiTForMaskedImageModeling''',
'''DeiTModel''',
'''DeiTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDeiTForImageClassification''',
'''TFDeiTForImageClassificationWithTeacher''',
'''TFDeiTForMaskedImageModeling''',
'''TFDeiTModel''',
'''TFDeiTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 74 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : List[str] = logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class __magic_name__ ( _lowercase):
A: Tuple = '''markuplm'''
def __init__( self : Any , lowerCamelCase__ : List[Any]=30522 , lowerCamelCase__ : Tuple=768 , lowerCamelCase__ : Dict=12 , lowerCamelCase__ : Tuple=12 , lowerCamelCase__ : List[Any]=3072 , lowerCamelCase__ : Dict="gelu" , lowerCamelCase__ : List[str]=0.1 , lowerCamelCase__ : Union[str, Any]=0.1 , lowerCamelCase__ : List[str]=512 , lowerCamelCase__ : Dict=2 , lowerCamelCase__ : Optional[int]=0.02 , lowerCamelCase__ : Optional[Any]=1E-1_2 , lowerCamelCase__ : List[Any]=0 , lowerCamelCase__ : Optional[int]=0 , lowerCamelCase__ : Union[str, Any]=2 , lowerCamelCase__ : Optional[int]=256 , lowerCamelCase__ : Dict=1024 , lowerCamelCase__ : Optional[Any]=216 , lowerCamelCase__ : str=1001 , lowerCamelCase__ : Any=32 , lowerCamelCase__ : Optional[int]=50 , lowerCamelCase__ : Any="absolute" , lowerCamelCase__ : Optional[int]=True , lowerCamelCase__ : List[Any]=None , **lowerCamelCase__ : int , ) -> Dict:
'''simple docstring'''
super().__init__(
pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ , )
UpperCamelCase__ : Optional[int] = vocab_size
UpperCamelCase__ : Optional[int] = hidden_size
UpperCamelCase__ : Optional[int] = num_hidden_layers
UpperCamelCase__ : int = num_attention_heads
UpperCamelCase__ : str = hidden_act
UpperCamelCase__ : Optional[Any] = intermediate_size
UpperCamelCase__ : int = hidden_dropout_prob
UpperCamelCase__ : int = attention_probs_dropout_prob
UpperCamelCase__ : List[Any] = max_position_embeddings
UpperCamelCase__ : List[str] = type_vocab_size
UpperCamelCase__ : List[str] = initializer_range
UpperCamelCase__ : Tuple = layer_norm_eps
UpperCamelCase__ : int = position_embedding_type
UpperCamelCase__ : Any = use_cache
UpperCamelCase__ : Any = classifier_dropout
# additional properties
UpperCamelCase__ : str = max_depth
UpperCamelCase__ : Any = max_xpath_tag_unit_embeddings
UpperCamelCase__ : List[Any] = max_xpath_subs_unit_embeddings
UpperCamelCase__ : List[Any] = tag_pad_id
UpperCamelCase__ : str = subs_pad_id
UpperCamelCase__ : List[Any] = xpath_unit_hidden_size
| 146 |
"""simple docstring"""
from __future__ import annotations
import requests
def _snake_case ( snake_case__ : str ):
A = F'https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'
return requests.get(snake_case__ ).json()
def _snake_case ( snake_case__ : int = 10 ):
A = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'
A = requests.get(snake_case__ ).json()[:max_stories]
return [get_hackernews_story(snake_case__ ) for story_id in story_ids]
def _snake_case ( snake_case__ : int = 10 ):
A = hackernews_top_stories(snake_case__ )
return "\n".join('* [{title}]({url})'.format(**snake_case__ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown()) | 74 | 0 |
'''simple docstring'''
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
UpperCAmelCase_ = [
# tf -> hf
('/', '.'),
('layer_', 'layers.'),
('kernel', 'weight'),
('beta', 'bias'),
('gamma', 'weight'),
('pegasus', 'model'),
]
UpperCAmelCase_ = [
('.output.dense', '.fc2'),
('intermediate.LayerNorm', 'final_layer_norm'),
('intermediate.dense', 'fc1'),
]
UpperCAmelCase_ = (
INIT_COMMON
+ [
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.out_proj'),
('attention.self', 'self_attn'),
('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'),
('attention.encdec_output.dense', 'encoder_attn.out_proj'),
('attention.encdec', 'encoder_attn'),
('key', 'k_proj'),
('value', 'v_proj'),
('query', 'q_proj'),
('decoder.LayerNorm', 'decoder.layernorm_embedding'),
]
+ END_COMMON
)
UpperCAmelCase_ = (
INIT_COMMON
+ [
('embeddings.word_embeddings', 'shared.weight'),
('embeddings.position_embeddings', 'embed_positions.weight'),
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.output'),
('attention.self', 'self_attn.self'),
('encoder.LayerNorm', 'encoder.layernorm_embedding'),
]
+ END_COMMON
)
UpperCAmelCase_ = [
'encdec/key/bias',
'encdec/query/bias',
'encdec/value/bias',
'self/key/bias',
'self/query/bias',
'self/value/bias',
'encdec_output/dense/bias',
'attention/output/dense/bias',
]
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
for tf_name, hf_name in patterns:
UpperCAmelCase__ = k.replace(snake_case__ , snake_case__ )
return k
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : dict , SCREAMING_SNAKE_CASE__ : dict ):
'''simple docstring'''
UpperCAmelCase__ = BigBirdPegasusConfig(**snake_case__ )
UpperCAmelCase__ = BigBirdPegasusForConditionalGeneration(snake_case__ )
UpperCAmelCase__ = torch_model.state_dict()
UpperCAmelCase__ = {}
# separating decoder weights
UpperCAmelCase__ = {k: tf_weights[k] for k in tf_weights if k.startswith("""pegasus/decoder""" )}
UpperCAmelCase__ = {k: tf_weights[k] for k in tf_weights if not k.startswith("""pegasus/decoder""" )}
for k, v in tqdm(decoder_weights.items() , """tf -> hf conversion""" ):
UpperCAmelCase__ = [k.endswith(snake_case__ ) for ending in KEYS_TO_IGNORE]
if any(snake_case__ ):
continue
UpperCAmelCase__ = DECODER_PATTERNS
UpperCAmelCase__ = rename_state_dict_key(snake_case__ , snake_case__ )
if new_k not in state_dict:
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ):
UpperCAmelCase__ = v.T
UpperCAmelCase__ = torch.from_numpy(snake_case__ )
assert v.shape == state_dict[new_k].shape, F'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
for k, v in tqdm(remaining_weights.items() , """tf -> hf conversion""" ):
UpperCAmelCase__ = [k.endswith(snake_case__ ) for ending in KEYS_TO_IGNORE]
if any(snake_case__ ):
continue
UpperCAmelCase__ = REMAINING_PATTERNS
UpperCAmelCase__ = rename_state_dict_key(snake_case__ , snake_case__ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ):
UpperCAmelCase__ = v.T
UpperCAmelCase__ = torch.from_numpy(snake_case__ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
UpperCAmelCase__ = mapping["""model.embed_positions.weight"""]
UpperCAmelCase__ = mapping.pop("""model.embed_positions.weight""" )
UpperCAmelCase__ , UpperCAmelCase__ = torch_model.load_state_dict(snake_case__ , strict=snake_case__ )
UpperCAmelCase__ = [
k
for k in missing
if k
not in [
"""final_logits_bias""",
"""model.encoder.embed_tokens.weight""",
"""model.decoder.embed_tokens.weight""",
"""lm_head.weight""",
]
]
assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], F'''no matches found for the following tf keys {extra}'''
return torch_model
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ = tf.train.list_variables(snake_case__ )
UpperCAmelCase__ = {}
UpperCAmelCase__ = ["""global_step"""]
for name, shape in tqdm(snake_case__ , desc="""converting tf checkpoint to dict""" ):
UpperCAmelCase__ = any(pat in name for pat in ignore_name )
if skip_key:
continue
UpperCAmelCase__ = tf.train.load_variable(snake_case__ , snake_case__ )
UpperCAmelCase__ = array
return tf_weights
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : dict ):
'''simple docstring'''
UpperCAmelCase__ = get_tf_weights_as_numpy(snake_case__ )
UpperCAmelCase__ = convert_bigbird_pegasus(snake_case__ , snake_case__ )
torch_model.save_pretrained(snake_case__ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.')
UpperCAmelCase_ = parser.parse_args()
UpperCAmelCase_ = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 346 |
"""simple docstring"""
from string import ascii_uppercase
_lowercase = {char: i for i, char in enumerate(ascii_uppercase)}
_lowercase = dict(enumerate(ascii_uppercase))
def _snake_case ( snake_case__ : str , snake_case__ : str ):
A = len(snake_case__ )
A = 0
while True:
if x == i:
A = 0
if len(snake_case__ ) == len(snake_case__ ):
break
key += key[i]
i += 1
return key
def _snake_case ( snake_case__ : str , snake_case__ : str ):
A = ''
A = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
A = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def _snake_case ( snake_case__ : str , snake_case__ : str ):
A = ''
A = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
A = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def _snake_case ( ):
A = 'THE GERMAN ATTACK'
A = 'SECRET'
A = generate_key(snake_case__ , snake_case__ )
A = cipher_text(snake_case__ , snake_case__ )
print(F'Encrypted Text = {s}' )
print(F'Original Text = {original_text(snake_case__ , snake_case__ )}' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 74 | 0 |
"""simple docstring"""
class snake_case :
'''simple docstring'''
def __init__( self : Union[str, Any], _lowerCamelCase : list ):
'''simple docstring'''
__A = set_counts
__A = max(A_ )
__A = len(A_ )
__A = [1] * num_sets
__A = list(range(A_ ) )
def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : int, _lowerCamelCase : int ):
'''simple docstring'''
__A = self.get_parent(A_ )
__A = self.get_parent(A_ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
__A = 0
__A = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
__A = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
__A = 0
__A = src_parent
__A = self.set_counts[src_parent]
__A = max(self.max_set, A_ )
return True
def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : int ):
'''simple docstring'''
if self.parents[disj_set] == disj_set:
return disj_set
__A = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 266 |
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : List[Any] ) -> Optional[Any]:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] ,model_result['ss'] ):
A = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(A_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
A = 'sshleifer/tiny-gpt2'
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
A = 'sgugger/tiny-distilbert-classification'
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,only_pretrain_model=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
A = 'sshleifer/tiny-gpt2'
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,torchscript=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == 'cpu' ,'Cant do half precision' )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
A = 'sshleifer/tiny-gpt2'
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,fpaa=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
A = 'sshleifer/tiny-gpt2'
A = AutoConfig.from_pretrained(A_ )
# set architectures equal to `None`
A = None
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ ,configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
A = 'sshleifer/tiny-gpt2'
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == 'cpu' ,'Can\'t do half precision' )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
A = 'sshleifer/tiny-gpt2'
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,fpaa=A_ ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
A = 'sshleifer/tiny-gpt2'
A = AutoConfig.from_pretrained(A_ )
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ ,configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
A = 'sshleifer/tinier_bart'
A = AutoConfig.from_pretrained(A_ )
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ ,configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
A = 'sshleifer/tiny-gpt2'
A = AutoConfig.from_pretrained(A_ )
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ ,configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
A = 'sshleifer/tinier_bart'
A = AutoConfig.from_pretrained(A_ )
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ ,configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
A = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,save_to_csv=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(A_ ,'inf_time.csv' ) ,train_memory_csv_file=os.path.join(A_ ,'train_mem.csv' ) ,inference_memory_csv_file=os.path.join(A_ ,'inf_mem.csv' ) ,train_time_csv_file=os.path.join(A_ ,'train_time.csv' ) ,env_info_csv_file=os.path.join(A_ ,'env.csv' ) ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
benchmark.run()
self.assertTrue(Path(os.path.join(A_ ,'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(A_ ,'train_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(A_ ,'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(A_ ,'train_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(A_ ,'env.csv' ) ).exists() )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
A = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(A_ : Optional[int] ):
self.assertTrue(hasattr(A_ ,'sequential' ) )
self.assertTrue(hasattr(A_ ,'cumulative' ) )
self.assertTrue(hasattr(A_ ,'current' ) )
self.assertTrue(hasattr(A_ ,'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(A_ ,'log.txt' ) ,log_print=A_ ,trace_memory_line_by_line=A_ ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(A_ ,'log.txt' ) ).exists() ) | 74 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""facebook/levit-128S""": """https://huggingface.co/facebook/levit-128S/resolve/main/config.json""",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class a__ ( _lowercase ):
"""simple docstring"""
__lowerCamelCase = '''levit'''
def __init__( self , lowercase=224 , lowercase=3 , lowercase=3 , lowercase=2 , lowercase=1 , lowercase=16 , lowercase=[128, 256, 384] , lowercase=[4, 8, 12] , lowercase=[4, 4, 4] , lowercase=[16, 16, 16] , lowercase=0 , lowercase=[2, 2, 2] , lowercase=[2, 2, 2] , lowercase=0.02 , **lowercase , ) -> Dict:
'''simple docstring'''
super().__init__(**A_ )
A__ = image_size
A__ = num_channels
A__ = kernel_size
A__ = stride
A__ = padding
A__ = hidden_sizes
A__ = num_attention_heads
A__ = depths
A__ = key_dim
A__ = drop_path_rate
A__ = patch_size
A__ = attention_ratio
A__ = mlp_ratio
A__ = initializer_range
A__ = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class a__ ( _lowercase ):
"""simple docstring"""
__lowerCamelCase = version.parse('1.11' )
@property
def UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def UpperCamelCase ( self ) -> float:
'''simple docstring'''
return 1e-4
| 68 |
"""simple docstring"""
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
_lowercase = re.compile(r'''^(?P<major>\d+)''' r'''\.(?P<minor>\d+)''' r'''\.(?P<patch>\d+)$''')
@total_ordering
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
_lowerCamelCase: str
_lowerCamelCase: Optional[str] = None
_lowerCamelCase: Optional[Union[str, int]] = None
_lowerCamelCase: Optional[Union[str, int]] = None
_lowerCamelCase: Optional[Union[str, int]] = None
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
A , A , A = _str_to_version_tuple(self.version_str )
def __repr__( self : Optional[int] ) -> Dict:
return F'{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
return self.major, self.minor, self.patch
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Tuple ) -> Union[str, Any]:
if isinstance(A_ ,A_ ):
return Version(A_ )
elif isinstance(A_ ,A_ ):
return other
raise TypeError(F'{other} (type {type(A_ )}) cannot be compared to version.' )
def __eq__( self : List[Any] ,A_ : Dict ) -> Any:
try:
A = self._validate_operand(A_ )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self : List[Any] ,A_ : Optional[int] ) -> Tuple:
A = self._validate_operand(A_ )
return self.tuple < other.tuple
def __hash__( self : Union[str, Any] ) -> Union[str, Any]:
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any ,A_ : List[str] ) -> List[str]:
A = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
return self.version_str
def _snake_case ( snake_case__ : List[str] ):
A = _VERSION_REG.match(snake_case__ )
if not res:
raise ValueError(F'Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.' )
return tuple(int(snake_case__ ) for v in [res.group('major' ), res.group('minor' ), res.group('patch' )] )
def _snake_case ( snake_case__ : str ):
return ".".join(str(snake_case__ ) for v in version_tuple ) | 74 | 0 |
"""simple docstring"""
def lowerCAmelCase_ ( snake_case_ : float , snake_case_ : float ) ->Optional[Any]:
if mass < 0:
raise ValueError('The mass of a body cannot be negative' )
return 0.5 * mass * abs(snake_case__ ) * abs(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) | 126 |
"""simple docstring"""
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
_lowercase = NewType('''DataClass''', Any)
_lowercase = NewType('''DataClassType''', Any)
def _snake_case ( snake_case__ : Tuple ):
if isinstance(snake_case__ , snake_case__ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F'Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).' )
def _snake_case ( snake_case__ : list ):
A = {str(snake_case__ ): choice for choice in choices}
return lambda snake_case__ : str_to_choice.get(snake_case__ , snake_case__ )
def _snake_case ( *,
snake_case__ : Union[str, List[str]] = None , snake_case__ : str = None , snake_case__ : Any = dataclasses.MISSING , snake_case__ : Callable[[], Any] = dataclasses.MISSING , snake_case__ : dict = None , **snake_case__ : Any , ):
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
A = {}
if aliases is not None:
A = aliases
if help is not None:
A = help
return dataclasses.field(metadata=snake_case__ , default=snake_case__ , default_factory=snake_case__ , **snake_case__ )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Iterable[DataClassType]
def __init__( self : List[str] ,A_ : Union[DataClassType, Iterable[DataClassType]] ,**A_ : Any ) -> Optional[int]:
# To make the default appear when using --help
if "formatter_class" not in kwargs:
A = ArgumentDefaultsHelpFormatter
super().__init__(**A_ )
if dataclasses.is_dataclass(A_ ):
A = [dataclass_types]
A = list(A_ )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(A_ )
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : ArgumentParser ,A_ : dataclasses.Field ) -> Optional[Any]:
A = F'--{field.name}'
A = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type ,A_ ):
raise RuntimeError(
'Unresolved type detected, which should have been done with the help of '
'`typing.get_type_hints` method by default' )
A = kwargs.pop('aliases' ,[] )
if isinstance(A_ ,A_ ):
A = [aliases]
A = getattr(field.type ,'__origin__' ,field.type )
if origin_type is Union or (hasattr(A_ ,'UnionType' ) and isinstance(A_ ,types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(A_ ) not in field.type.__args__
):
raise ValueError(
'Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'
' the argument parser only supports one type per argument.'
F' Problem encountered in field \'{field.name}\'.' )
if type(A_ ) not in field.type.__args__:
# filter `str` in Union
A = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
A = getattr(field.type ,'__origin__' ,field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
A = (
field.type.__args__[0] if isinstance(A_ ,field.type.__args__[1] ) else field.type.__args__[1]
)
A = getattr(field.type ,'__origin__' ,field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
A = {}
if origin_type is Literal or (isinstance(field.type ,A_ ) and issubclass(field.type ,A_ )):
if origin_type is Literal:
A = field.type.__args__
else:
A = [x.value for x in field.type]
A = make_choice_type_function(kwargs['choices'] )
if field.default is not dataclasses.MISSING:
A = field.default
else:
A = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
A = copy(A_ )
# Hack because type=bool in argparse does not behave as we want.
A = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
A = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
A = default
# This tells argparse we accept 0 or 1 value after --field_name
A = '?'
# This is the value that will get picked if we do --field_name (without value)
A = True
elif isclass(A_ ) and issubclass(A_ ,A_ ):
A = field.type.__args__[0]
A = '+'
if field.default_factory is not dataclasses.MISSING:
A = field.default_factory()
elif field.default is dataclasses.MISSING:
A = True
else:
A = field.type
if field.default is not dataclasses.MISSING:
A = field.default
elif field.default_factory is not dataclasses.MISSING:
A = field.default_factory()
else:
A = True
parser.add_argument(A_ ,*A_ ,**A_ )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
A = False
parser.add_argument(F'--no_{field.name}' ,action='store_false' ,dest=field.name ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : DataClassType ) -> List[Any]:
if hasattr(A_ ,'_argument_group_name' ):
A = self.add_argument_group(dtype._argument_group_name )
else:
A = self
try:
A = get_type_hints(A_ )
except NameError:
raise RuntimeError(
F'Type resolution failed for {dtype}. Try declaring the class in global scope or '
'removing line of `from __future__ import annotations` which opts in Postponed '
'Evaluation of Annotations (PEP 563)' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(A_ ):
A = '.'.join(map(A_ ,sys.version_info[:3] ) )
raise RuntimeError(
F'Type resolution failed for {dtype} on Python {python_version}. Try removing '
'line of `from __future__ import annotations` which opts in union types as '
'`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '
'support Python versions that lower than 3.10, you need to use '
'`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '
'`X | None`.' ) from ex
raise
for field in dataclasses.fields(A_ ):
if not field.init:
continue
A = type_hints[field.name]
self._parse_dataclass_field(A_ ,A_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Any=None ,A_ : int=False ,A_ : Any=True ,A_ : List[str]=None ,A_ : Union[str, Any]=None ,) -> Tuple[DataClass, ...]:
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
A = []
if args_filename:
args_files.append(Path(A_ ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('.args' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
A = ArgumentParser()
args_file_parser.add_argument(A_ ,type=A_ ,action='append' )
# Use only remaining args for further parsing (remove the args_file_flag)
A , A = args_file_parser.parse_known_args(args=A_ )
A = vars(A_ ).get(args_file_flag.lstrip('-' ) ,A_ )
if cmd_args_file_paths:
args_files.extend([Path(A_ ) for p in cmd_args_file_paths] )
A = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
A = file_args + args if args is not None else file_args + sys.argv[1:]
A , A = self.parse_known_args(args=A_ )
A = []
for dtype in self.dataclass_types:
A = {f.name for f in dataclasses.fields(A_ ) if f.init}
A = {k: v for k, v in vars(A_ ).items() if k in keys}
for k in keys:
delattr(A_ ,A_ )
A = dtype(**A_ )
outputs.append(A_ )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(A_ )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'Some specified arguments are not used by the HfArgumentParser: {remaining_args}' )
return (*outputs,)
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Dict[str, Any] ,A_ : bool = False ) -> Tuple[DataClass, ...]:
A = set(args.keys() )
A = []
for dtype in self.dataclass_types:
A = {f.name for f in dataclasses.fields(A_ ) if f.init}
A = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
A = dtype(**A_ )
outputs.append(A_ )
if not allow_extra_keys and unused_keys:
raise ValueError(F'Some keys are not used by the HfArgumentParser: {sorted(A_ )}' )
return tuple(A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : bool = False ) -> Tuple[DataClass, ...]:
with open(Path(A_ ) ,encoding='utf-8' ) as open_json_file:
A = json.loads(open_json_file.read() )
A = self.parse_dict(A_ ,allow_extra_keys=A_ )
return tuple(A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : bool = False ) -> Tuple[DataClass, ...]:
A = self.parse_dict(yaml.safe_load(Path(A_ ).read_text() ) ,allow_extra_keys=A_ )
return tuple(A_ ) | 74 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'''
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class __magic_name__ ( _lowercase ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = '''roformer'''
def __init__( self, lowercase_=50000, lowercase_=None, lowercase_=768, lowercase_=12, lowercase_=12, lowercase_=3072, lowercase_="gelu", lowercase_=0.1, lowercase_=0.1, lowercase_=1536, lowercase_=2, lowercase_=0.02, lowercase_=1E-12, lowercase_=0, lowercase_=False, lowercase_=True, **lowercase_, ) -> Dict:
"""simple docstring"""
super().__init__(pad_token_id=A_, **A_ )
a__ =vocab_size
a__ =hidden_size if embedding_size is None else embedding_size
a__ =hidden_size
a__ =num_hidden_layers
a__ =num_attention_heads
a__ =hidden_act
a__ =intermediate_size
a__ =hidden_dropout_prob
a__ =attention_probs_dropout_prob
a__ =max_position_embeddings
a__ =type_vocab_size
a__ =initializer_range
a__ =layer_norm_eps
a__ =rotary_value
a__ =use_cache
class __magic_name__ ( _lowercase ):
'''simple docstring'''
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
a__ ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
a__ ={0: '''batch''', 1: '''sequence'''}
a__ ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 188 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_lowercase = 16
_lowercase = 32
def _snake_case ( snake_case__ : Accelerator , snake_case__ : int = 16 , snake_case__ : str = "bert-base-cased" ):
A = AutoTokenizer.from_pretrained(snake_case__ )
A = load_dataset('glue' , 'mrpc' )
def tokenize_function(snake_case__ : Dict ):
# max_length=None => use the model max length (it's actually the default)
A = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=snake_case__ , max_length=snake_case__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
A = datasets.map(
snake_case__ , batched=snake_case__ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=snake_case__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(snake_case__ : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case__ , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(snake_case__ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
A = DataLoader(
tokenized_datasets['train'] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
A = DataLoader(
tokenized_datasets['validation'] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
return train_dataloader, eval_dataloader
def _snake_case ( snake_case__ : Optional[int] , snake_case__ : Optional[int] ):
# Initialize accelerator
A = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A = config['lr']
A = int(config['num_epochs'] )
A = int(config['seed'] )
A = int(config['batch_size'] )
A = args.model_name_or_path
set_seed(snake_case__ )
A , A = get_dataloaders(snake_case__ , snake_case__ , snake_case__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A = AutoModelForSequenceClassification.from_pretrained(snake_case__ , return_dict=snake_case__ )
# Instantiate optimizer
A = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
A = optimizer_cls(params=model.parameters() , lr=snake_case__ )
if accelerator.state.deepspeed_plugin is not None:
A = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
A = 1
A = (len(snake_case__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
A = get_linear_schedule_with_warmup(
optimizer=snake_case__ , num_warmup_steps=0 , num_training_steps=snake_case__ , )
else:
A = DummyScheduler(snake_case__ , total_num_steps=snake_case__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A , A , A , A , A = accelerator.prepare(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# We need to keep track of how many total steps we have iterated over
A = 0
# We also need to keep track of the stating epoch so files are named properly
A = 0
# Now we train the model
A = evaluate.load('glue' , 'mrpc' )
A = 0
A = {}
for epoch in range(snake_case__ , snake_case__ ):
model.train()
for step, batch in enumerate(snake_case__ ):
A = model(**snake_case__ )
A = outputs.loss
A = loss / gradient_accumulation_steps
accelerator.backward(snake_case__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
A = 0
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A = model(**snake_case__ )
A = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
A , A = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(snake_case__ ) - 1:
A = predictions[: len(eval_dataloader.dataset ) - samples_seen]
A = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=snake_case__ , references=snake_case__ , )
A = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , snake_case__ )
A = eval_metric['accuracy']
if best_performance < eval_metric["accuracy"]:
A = eval_metric['accuracy']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F'Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f:
json.dump(snake_case__ , snake_case__ )
def _snake_case ( ):
A = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=snake_case__ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=snake_case__ , )
parser.add_argument(
'--output_dir' , type=snake_case__ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--performance_lower_bound' , type=snake_case__ , default=snake_case__ , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , )
parser.add_argument(
'--num_epochs' , type=snake_case__ , default=3 , help='Number of train epochs.' , )
A = parser.parse_args()
A = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(snake_case__ , snake_case__ )
if __name__ == "__main__":
main() | 74 | 0 |
'''simple docstring'''
import math
import random
def a__ ( a__ , a__ = False ):
"""simple docstring"""
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
UpperCAmelCase : Dict = 0.0_2
def a__ ( a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = float(2 * (random.randint(1 , 1_00 )) - 1 )
for _ in range(snake_case__ ):
# Forward propagation
__SCREAMING_SNAKE_CASE = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
__SCREAMING_SNAKE_CASE = (expected / 1_00) - layer_a
# Error delta
__SCREAMING_SNAKE_CASE = layer_1_error * sigmoid_function(snake_case__ , snake_case__ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 1_00
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase : Optional[Any] = int(input('Expected value: '))
UpperCAmelCase : List[Any] = int(input('Number of propagations: '))
print(forward_propagation(expected, number_propagations))
| 267 |
"""simple docstring"""
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] ,A_ : str ,A_ : Dict=13 ,A_ : str=7 ,A_ : str=True ,A_ : Any=True ,A_ : Optional[Any]=True ,A_ : Any=True ,A_ : Optional[Any]=True ,A_ : Any=False ,A_ : str=False ,A_ : Tuple=False ,A_ : str=2 ,A_ : Optional[int]=99 ,A_ : Union[str, Any]=0 ,A_ : Optional[Any]=32 ,A_ : Optional[int]=5 ,A_ : Optional[int]=4 ,A_ : Union[str, Any]=0.1 ,A_ : List[str]=0.1 ,A_ : Union[str, Any]=512 ,A_ : Union[str, Any]=2 ,A_ : Any=0.02 ,A_ : List[str]=2 ,A_ : int=4 ,A_ : int="last" ,A_ : Dict=True ,A_ : Union[str, Any]=None ,A_ : Any=0 ,) -> List[Any]:
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_input_lengths
A = use_token_type_ids
A = use_labels
A = gelu_activation
A = sinusoidal_embeddings
A = causal
A = asm
A = n_langs
A = vocab_size
A = n_special
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_sequence_label_size
A = initializer_range
A = num_labels
A = num_choices
A = summary_type
A = use_proj
A = scope
A = bos_token_id
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A = random_attention_mask([self.batch_size, self.seq_length] )
A = None
if self.use_input_lengths:
A = (
ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
A = None
if self.use_token_type_ids:
A = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs )
A = None
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A = ids_tensor([self.batch_size] ,2 ).float()
A = ids_tensor([self.batch_size] ,self.num_choices )
A = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
return XLMConfig(
vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,num_labels=self.num_labels ,bos_token_id=self.bos_token_id ,)
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Any ,A_ : int ,A_ : Dict ,A_ : str ,A_ : Optional[Any] ,A_ : List[str] ,A_ : Union[str, Any] ,A_ : int ,A_ : str ,) -> Any:
A = XLMModel(config=A_ )
model.to(A_ )
model.eval()
A = model(A_ ,lengths=A_ ,langs=A_ )
A = model(A_ ,langs=A_ )
A = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Any ,A_ : str ,A_ : Optional[int] ,A_ : Union[str, Any] ,A_ : Optional[int] ,A_ : str ,A_ : Any ,A_ : str ,A_ : Dict ,) -> Dict:
A = XLMWithLMHeadModel(A_ )
model.to(A_ )
model.eval()
A = model(A_ ,token_type_ids=A_ ,labels=A_ )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : List[str] ,A_ : Union[str, Any] ,A_ : Union[str, Any] ,A_ : List[str] ,A_ : Any ,A_ : Optional[int] ,A_ : Optional[int] ,A_ : Optional[int] ,A_ : Optional[Any] ,) -> int:
A = XLMForQuestionAnsweringSimple(A_ )
model.to(A_ )
model.eval()
A = model(A_ )
A = model(A_ ,start_positions=A_ ,end_positions=A_ )
A = outputs
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Tuple ,A_ : Optional[int] ,A_ : Any ,A_ : List[Any] ,A_ : int ,A_ : Tuple ,A_ : Tuple ,A_ : List[str] ,A_ : Optional[int] ,) -> List[Any]:
A = XLMForQuestionAnswering(A_ )
model.to(A_ )
model.eval()
A = model(A_ )
A = model(
A_ ,start_positions=A_ ,end_positions=A_ ,cls_index=A_ ,is_impossible=A_ ,p_mask=A_ ,)
A = model(
A_ ,start_positions=A_ ,end_positions=A_ ,cls_index=A_ ,is_impossible=A_ ,)
((A) , ) = result_with_labels.to_tuple()
A = model(A_ ,start_positions=A_ ,end_positions=A_ )
((A) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape ,() )
self.parent.assertEqual(result.start_top_log_probs.shape ,(self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape ,(self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape ,(self.batch_size,) )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Tuple ,A_ : int ,A_ : Optional[int] ,A_ : List[str] ,A_ : str ,A_ : Optional[Any] ,A_ : Optional[int] ,A_ : Optional[Any] ,A_ : List[Any] ,) -> Optional[int]:
A = XLMForSequenceClassification(A_ )
model.to(A_ )
model.eval()
A = model(A_ )
A = model(A_ ,labels=A_ )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : List[Any] ,A_ : str ,A_ : Optional[Any] ,A_ : List[Any] ,A_ : Optional[int] ,A_ : Tuple ,A_ : Union[str, Any] ,A_ : Optional[int] ,A_ : Optional[int] ,) -> List[str]:
A = self.num_labels
A = XLMForTokenClassification(A_ )
model.to(A_ )
model.eval()
A = model(A_ ,attention_mask=A_ ,labels=A_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Optional[int] ,A_ : Union[str, Any] ,A_ : List[str] ,A_ : Optional[int] ,A_ : List[str] ,A_ : Optional[Any] ,A_ : Union[str, Any] ,A_ : Dict ,A_ : List[Any] ,) -> List[str]:
A = self.num_choices
A = XLMForMultipleChoice(config=A_ )
model.to(A_ )
model.eval()
A = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A = model(
A_ ,attention_mask=A_ ,token_type_ids=A_ ,labels=A_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
A = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = config_and_inputs
A = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( _lowercase , _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Union[str, Any] = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
_lowerCamelCase: str = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_lowerCamelCase: Optional[int] = (
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[int] ,A_ : Union[str, Any] ,A_ : Union[str, Any] ,A_ : Any ,A_ : Any ) -> Any:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : str ,A_ : Optional[int] ,A_ : List[Any]=False ) -> int:
A = super()._prepare_for_class(A_ ,A_ ,return_labels=A_ )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A_ )
A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A_ )
return inputs_dict
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
A = XLMModelTester(self )
A = ConfigTester(self ,config_class=A_ ,emb_dim=37 )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Union[str, Any] ,A_ : Any ,A_ : str ,A_ : Tuple ,A_ : Any ,A_ : Any=False ,A_ : Any=1 ) -> List[Any]:
self.assertIsInstance(A_ ,A_ )
self.assertListEqual(
[isinstance(A_ ,A_ ) for iter_attentions in attentions] ,[True] * len(A_ ) )
self.assertEqual(len(A_ ) ,(max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(A_ ):
# adds PAD dummy token
A = min_length + idx + 1
A = min_length + idx + 1
A = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] ,[expected_shape] * len(A_ ) )
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Optional[int] ,A_ : str ,A_ : Optional[int] ,A_ : int ,A_ : Any ,A_ : str=False ,A_ : Any=1 ) -> Tuple:
self.assertIsInstance(A_ ,A_ )
self.assertListEqual(
[isinstance(A_ ,A_ ) for iter_hidden_states in hidden_states] ,[True] * len(A_ ) ,)
self.assertEqual(len(A_ ) ,(max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(A_ ):
# adds PAD dummy token
A = min_length + idx + 1
A = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] ,[expected_shape] * len(A_ ) ,)
pass
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = XLMModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
A = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(A_ )
A = torch.tensor([[14, 447]] ,dtype=torch.long ,device=A_ ) # the president
A = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
A = model.generate(A_ ,do_sample=A_ )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() ,A_ ) | 74 | 0 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
_lowerCamelCase ="\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n"
class a_ ( unittest.TestCase , _lowercase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =load_tool('text-question-answering' )
self.tool.setup()
SCREAMING_SNAKE_CASE =load_tool('text-question-answering' ,remote=A_ )
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =self.tool(A_ ,'What did Hugging Face do in April 2021?' )
self.assertEqual(A_ ,'launched the BigScience Research Workshop' )
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =self.remote_tool(A_ ,'What did Hugging Face do in April 2021?' )
self.assertEqual(A_ ,'launched the BigScience Research Workshop' )
def _lowerCAmelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE =self.tool(text=A_ ,question='What did Hugging Face do in April 2021?' )
self.assertEqual(A_ ,'launched the BigScience Research Workshop' )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.remote_tool(text=A_ ,question='What did Hugging Face do in April 2021?' )
self.assertEqual(A_ ,'launched the BigScience Research Workshop' )
| 334 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
_lowercase = logging.get_logger(__name__)
@dataclass
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[int] = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self : int ,**A_ : Any ) -> Any:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
A = deprecated_arg[3:]
A = not kwargs.pop(A_ )
logger.warning(
F'{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'
F' {positive_arg}={kwargs[positive_arg]}' )
A = kwargs.pop('tpu_name' ,self.tpu_name )
A = kwargs.pop('device_idx' ,self.device_idx )
A = kwargs.pop('eager_mode' ,self.eager_mode )
A = kwargs.pop('use_xla' ,self.use_xla )
super().__init__(**A_ )
_lowerCamelCase: str = field(
default=_lowercase , metadata={'''help''': '''Name of TPU'''} , )
_lowerCamelCase: int = field(
default=0 , metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''} , )
_lowerCamelCase: bool = field(default=_lowercase , metadata={'''help''': '''Benchmark models in eager model.'''} )
_lowerCamelCase: bool = field(
default=_lowercase , metadata={
'''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'''
} , )
@cached_property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self ,['tf'] )
A = None
if self.tpu:
try:
if self.tpu_name:
A = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
A = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
A = None
return tpu
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self ,['tf'] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
A = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] ,'GPU' )
A = tf.distribute.OneDeviceStrategy(device=F'/gpu:{self.device_idx}' )
else:
tf.config.set_visible_devices([] ,'GPU' ) # disable GPU
A = tf.distribute.OneDeviceStrategy(device=F'/cpu:{self.device_idx}' )
return strategy
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> bool:
requires_backends(self ,['tf'] )
return self._setup_tpu is not None
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> "tf.distribute.Strategy":
requires_backends(self ,['tf'] )
return self._setup_strategy
@property
def _SCREAMING_SNAKE_CASE ( self : int ) -> str:
requires_backends(self ,['tf'] )
return tf.config.list_physical_devices('GPU' )
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
requires_backends(self ,['tf'] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def _SCREAMING_SNAKE_CASE ( self : str ) -> bool:
return self.n_gpu > 0 | 74 | 0 |
"""simple docstring"""
import argparse
import struct
import unittest
class UpperCamelCase__:
def __init__( self ,__UpperCAmelCase ) -> None:
A__ = data
# Initialize hash values
A__ = [
0X6A_09E_667,
0XBB_67A_E85,
0X3C_6EF_372,
0XA5_4FF_53A,
0X51_0E5_27F,
0X9B_056_88C,
0X1F_83D_9AB,
0X5B_E0C_D19,
]
# Initialize round constants
A__ = [
0X42_8A2_F98,
0X71_374_491,
0XB5_C0F_BCF,
0XE9_B5D_BA5,
0X39_56C_25B,
0X59_F11_1F1,
0X92_3F8_2A4,
0XAB_1C5_ED5,
0XD8_07A_A98,
0X12_835_B01,
0X24_318_5BE,
0X55_0C7_DC3,
0X72_BE5_D74,
0X80_DEB_1FE,
0X9B_DC0_6A7,
0XC1_9BF_174,
0XE4_9B6_9C1,
0XEF_BE4_786,
0X0F_C19_DC6,
0X24_0CA_1CC,
0X2D_E92_C6F,
0X4A_748_4AA,
0X5C_B0A_9DC,
0X76_F98_8DA,
0X98_3E5_152,
0XA8_31C_66D,
0XB0_032_7C8,
0XBF_597_FC7,
0XC6_E00_BF3,
0XD5_A79_147,
0X06_CA6_351,
0X14_292_967,
0X27_B70_A85,
0X2E_1B2_138,
0X4D_2C6_DFC,
0X53_380_D13,
0X65_0A7_354,
0X76_6A0_ABB,
0X81_C2C_92E,
0X92_722_C85,
0XA2_BFE_8A1,
0XA8_1A6_64B,
0XC2_4B8_B70,
0XC7_6C5_1A3,
0XD1_92E_819,
0XD6_990_624,
0XF4_0E3_585,
0X10_6AA_070,
0X19_A4C_116,
0X1E_376_C08,
0X27_487_74C,
0X34_B0B_CB5,
0X39_1C0_CB3,
0X4E_D8A_A4A,
0X5B_9CC_A4F,
0X68_2E6_FF3,
0X74_8F8_2EE,
0X78_A56_36F,
0X84_C87_814,
0X8C_C70_208,
0X90_BEF_FFA,
0XA4_506_CEB,
0XBE_F9A_3F7,
0XC6_717_8F2,
]
A__ = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def snake_case__ ( __UpperCAmelCase ) -> bytes:
A__ = b'\x80' + (b'\x00' * (63 - (len(A_ ) + 8) % 64))
A__ = struct.pack('>Q' ,(len(A_ ) * 8) )
return data + padding + big_endian_integer
def snake_case__ ( self ) -> None:
# Convert into blocks of 64 bytes
A__ = [
self.preprocessed_data[x : x + 64]
for x in range(0 ,len(self.preprocessed_data ) ,64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
A__ = list(struct.unpack('>16L' ,A_ ) )
# add 48 0-ed integers
words += [0] * 48
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ = self.hashes
for index in range(0 ,64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
A__ = (
self.ror(words[index - 15] ,7 )
^ self.ror(words[index - 15] ,18 )
^ (words[index - 15] >> 3)
)
A__ = (
self.ror(words[index - 2] ,17 )
^ self.ror(words[index - 2] ,19 )
^ (words[index - 2] >> 10)
)
A__ = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X100_000_000
# Compression
A__ = self.ror(A_ ,6 ) ^ self.ror(A_ ,11 ) ^ self.ror(A_ ,25 )
A__ = (e & f) ^ ((~e & 0XFF_FFF_FFF) & g)
A__ = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X100_000_000
A__ = self.ror(A_ ,2 ) ^ self.ror(A_ ,13 ) ^ self.ror(A_ ,22 )
A__ = (a & b) ^ (a & c) ^ (b & c)
A__ = (sa + maj) % 0X100_000_000
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ = (
g,
f,
e,
((d + tempa) % 0X100_000_000),
c,
b,
a,
((tempa + tempa) % 0X100_000_000),
)
A__ = [a, b, c, d, e, f, g, h]
# Modify final values
A__ = [
((element + mutated_hash_values[index]) % 0X100_000_000)
for index, element in enumerate(self.hashes )
]
A__ = ''.join([hex(A_ )[2:].zfill(8 ) for value in self.hashes] )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> int:
return 0XFF_FFF_FFF & (value << (32 - rotations)) | (value >> rotations)
class UpperCamelCase__( unittest.TestCase ):
def snake_case__ ( self ) -> None:
import hashlib
A__ = bytes('Test String' ,'utf-8' )
self.assertEqual(SHAaaa(A_ ).hash ,hashlib.shaaaa(A_ ).hexdigest() )
def UpperCAmelCase ( ):
"""simple docstring"""
import doctest
doctest.testmod()
A__ = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
A__ = parser.parse_args()
A__ = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
A__ = f.read()
else:
A__ = bytes(snake_case__ , 'utf-8' )
print(SHAaaa(snake_case__ ).hash )
if __name__ == "__main__":
main()
| 221 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Tuple = '''dpt'''
def __init__( self : str ,A_ : Tuple=768 ,A_ : int=12 ,A_ : Optional[int]=12 ,A_ : Optional[int]=3072 ,A_ : List[str]="gelu" ,A_ : str=0.0 ,A_ : int=0.0 ,A_ : str=0.02 ,A_ : str=1e-12 ,A_ : str=384 ,A_ : Dict=16 ,A_ : Union[str, Any]=3 ,A_ : Dict=False ,A_ : Any=True ,A_ : Optional[int]=[2, 5, 8, 11] ,A_ : Optional[Any]="project" ,A_ : Tuple=[4, 2, 1, 0.5] ,A_ : int=[96, 192, 384, 768] ,A_ : int=256 ,A_ : str=-1 ,A_ : Optional[int]=False ,A_ : Optional[int]=True ,A_ : Union[str, Any]=0.4 ,A_ : Union[str, Any]=255 ,A_ : Union[str, Any]=0.1 ,A_ : List[str]=[1, 1024, 24, 24] ,A_ : List[str]=[0, 1] ,A_ : List[Any]=None ,**A_ : Tuple ,) -> Union[str, Any]:
super().__init__(**A_ )
A = hidden_size
A = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('Initializing the config with a `BiT` backbone.' )
A = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
}
A = BitConfig(**A_ )
elif isinstance(A_ ,A_ ):
logger.info('Initializing the config with a `BiT` backbone.' )
A = BitConfig(**A_ )
elif isinstance(A_ ,A_ ):
A = backbone_config
else:
raise ValueError(
F'backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.' )
A = backbone_featmap_shape
A = neck_ignore_stages
if readout_type != "project":
raise ValueError('Readout type must be \'project\' when using `DPT-hybrid` mode.' )
else:
A = None
A = None
A = []
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = image_size
A = patch_size
A = num_channels
A = qkv_bias
A = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('Readout_type must be one of [\'ignore\', \'add\', \'project\']' )
A = readout_type
A = reassemble_factors
A = neck_hidden_sizes
A = fusion_hidden_size
A = head_in_index
A = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
A = use_auxiliary_head
A = auxiliary_loss_weight
A = semantic_loss_ignore_index
A = semantic_classifier_dropout
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
A = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
A = self.backbone_config.to_dict()
A = self.__class__.model_type
return output | 74 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : str = {
"""shi-labs/dinat-mini-in1k-224""": """https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json""",
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class _UpperCAmelCase ( _lowercase, _lowercase ):
'''simple docstring'''
lowerCamelCase__ ='''dinat'''
lowerCamelCase__ ={
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__(self , a_=4 , a_=3 , a_=64 , a_=[3, 4, 6, 5] , a_=[2, 4, 8, 16] , a_=7 , a_=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , a_=3.0 , a_=True , a_=0.0 , a_=0.0 , a_=0.1 , a_="gelu" , a_=0.02 , a_=1E-5 , a_=0.0 , a_=None , a_=None , **a_ , ):
'''simple docstring'''
super().__init__(**A_ )
__snake_case : Dict = patch_size
__snake_case : List[str] = num_channels
__snake_case : List[str] = embed_dim
__snake_case : Dict = depths
__snake_case : Union[str, Any] = len(A_ )
__snake_case : Optional[int] = num_heads
__snake_case : List[str] = kernel_size
__snake_case : List[Any] = dilations
__snake_case : Optional[int] = mlp_ratio
__snake_case : Optional[Any] = qkv_bias
__snake_case : List[Any] = hidden_dropout_prob
__snake_case : Union[str, Any] = attention_probs_dropout_prob
__snake_case : str = drop_path_rate
__snake_case : Dict = hidden_act
__snake_case : Union[str, Any] = layer_norm_eps
__snake_case : Optional[Any] = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__snake_case : Optional[Any] = int(embed_dim * 2 ** (len(A_ ) - 1) )
__snake_case : str = layer_scale_init_value
__snake_case : Union[str, Any] = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(A_ ) + 1 )]
__snake_case , __snake_case : str = get_aligned_output_features_output_indices(
out_features=A_ , out_indices=A_ , stage_names=self.stage_names )
| 102 |
"""simple docstring"""
from __future__ import annotations
import math
_lowercase = '''2020.9.26'''
_lowercase = '''xcodz-dot, cclaus, dhruvmanila'''
def _snake_case ( snake_case__ : float , snake_case__ : float , snake_case__ : float , snake_case__ : float , snake_case__ : float ):
if not all(isinstance(snake_case__ , (float, int) ) for val in locals().values() ):
A = F'Input values must either be float or int: {list(locals().values() )}'
raise TypeError(snake_case__ )
A = ((x * distance) / (z + distance)) * scale
A = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def _snake_case ( snake_case__ : float , snake_case__ : float , snake_case__ : float , snake_case__ : str , snake_case__ : float ):
if not isinstance(snake_case__ , snake_case__ ):
raise TypeError('Axis must be a str' )
A = locals()
del input_variables["axis"]
if not all(isinstance(snake_case__ , (float, int) ) for val in input_variables.values() ):
A = (
'Input values except axis must either be float or int: '
F'{list(input_variables.values() )}'
)
raise TypeError(snake_case__ )
A = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
A = x * math.cos(snake_case__ ) - y * math.sin(snake_case__ )
A = y * math.cos(snake_case__ ) + x * math.sin(snake_case__ )
A = z
elif axis == "x":
A = y * math.cos(snake_case__ ) - z * math.sin(snake_case__ )
A = z * math.cos(snake_case__ ) + y * math.sin(snake_case__ )
A = x
elif axis == "y":
A = x * math.cos(snake_case__ ) - z * math.sin(snake_case__ )
A = z * math.cos(snake_case__ ) + x * math.sin(snake_case__ )
A = y
else:
raise ValueError('not a valid axis, choose one of \'x\', \'y\', \'z\'' )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }""")
print(F"""{rotate(1.0, 2.0, 3.0, 'y', 90.0) = }""") | 74 | 0 |
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
__UpperCamelCase :int = f"""Input value of [number={number}] must be an integer"""
raise TypeError(snake_case__ )
if number < 0:
return False
__UpperCamelCase :Optional[int] = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 |
"""simple docstring"""
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : int ,A_ : int ) -> Union[str, Any]:
A = n
A = [None] * self.n
A = 0 # index of the first element
A = 0
A = 0
def __len__( self : int ) -> int:
return self.size
def _SCREAMING_SNAKE_CASE ( self : Any ) -> bool:
return self.size == 0
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
return False if self.is_empty() else self.array[self.front]
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : List[Any] ) -> int:
if self.size >= self.n:
raise Exception('QUEUE IS FULL' )
A = data
A = (self.rear + 1) % self.n
self.size += 1
return self
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
if self.size == 0:
raise Exception('UNDERFLOW' )
A = self.array[self.front]
A = None
A = (self.front + 1) % self.n
self.size -= 1
return temp | 74 | 0 |
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__UpperCamelCase : int = abspath(join(dirname(__file__), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def _a ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def _a ( SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case__ )
def _a ( SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
from transformers.testing_utils import pytest_terminal_summary_main
UpperCamelCase__ : Dict = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(snake_case__ , id=snake_case__ )
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
if exitstatus == 5:
UpperCamelCase__ : Dict = 0
# Doctest custom flag to ignore output.
__UpperCamelCase : Optional[Any] = doctest.register_optionflag("IGNORE_RESULT")
__UpperCamelCase : List[str] = doctest.OutputChecker
class __magic_name__ ( _lowercase):
def UpperCAmelCase__ ( self : Tuple , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : str ) -> Any:
'''simple docstring'''
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , A_ , A_ , A_ )
__UpperCamelCase : Optional[Any] = CustomOutputChecker
__UpperCamelCase : int = HfDoctestModule
__UpperCamelCase : str = HfDocTestParser
| 146 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
_lowercase = logging.get_logger(__name__)
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,*A_ : List[str] ,**A_ : int ) -> None:
warnings.warn(
'The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use YolosImageProcessor instead.' ,A_ ,)
super().__init__(*A_ ,**A_ ) | 74 | 0 |
'''simple docstring'''
from typing import List
import numpy as np
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : dict ):
'''simple docstring'''
UpperCAmelCase__ = {key: len(snake_case__ ) for key, value in gen_kwargs.items() if isinstance(snake_case__ , snake_case__ )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
"""Sharding is ambiguous for this dataset: """
+ """we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n"""
+ """\n""".join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() )
+ """\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, """
+ """and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."""
) )
UpperCAmelCase__ = max(lists_lengths.values() , default=0 )
return max(1 , snake_case__ )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
UpperCAmelCase__ = []
for group_idx in range(snake_case__ ):
UpperCAmelCase__ = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
UpperCAmelCase__ = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
UpperCAmelCase__ = range(snake_case__ , start + num_shards_to_add )
shards_indices_per_group.append(snake_case__ )
return shards_indices_per_group
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : dict , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
UpperCAmelCase__ = _number_of_shards_in_gen_kwargs(snake_case__ )
if num_shards == 1:
return [dict(snake_case__ )]
else:
UpperCAmelCase__ = _distribute_shards(num_shards=snake_case__ , max_num_jobs=snake_case__ )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(snake_case__ , snake_case__ )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(snake_case__ ) )
]
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : List[dict] ):
'''simple docstring'''
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , snake_case__ )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : np.random.Generator , SCREAMING_SNAKE_CASE__ : dict ):
'''simple docstring'''
UpperCAmelCase__ = {len(snake_case__ ) for value in gen_kwargs.values() if isinstance(snake_case__ , snake_case__ )}
UpperCAmelCase__ = {}
for size in list_sizes:
UpperCAmelCase__ = list(range(snake_case__ ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
UpperCAmelCase__ = dict(snake_case__ )
for key, value in shuffled_kwargs.items():
if isinstance(snake_case__ , snake_case__ ):
UpperCAmelCase__ = [value[i] for i in indices_per_size[len(snake_case__ )]]
return shuffled_kwargs
| 346 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''',
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: List[str] = '''gpt_bigcode'''
_lowerCamelCase: List[Any] = ['''past_key_values''']
_lowerCamelCase: int = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Optional[int] ,A_ : Dict=5_0257 ,A_ : Union[str, Any]=1024 ,A_ : str=768 ,A_ : Any=12 ,A_ : Any=12 ,A_ : Optional[int]=None ,A_ : Any="gelu_pytorch_tanh" ,A_ : List[str]=0.1 ,A_ : Optional[int]=0.1 ,A_ : List[str]=0.1 ,A_ : Tuple=1e-5 ,A_ : Optional[int]=0.02 ,A_ : List[str]=True ,A_ : Optional[Any]=True ,A_ : List[Any]=5_0256 ,A_ : Union[str, Any]=5_0256 ,A_ : int=True ,A_ : Optional[Any]=True ,A_ : Dict=True ,**A_ : Union[str, Any] ,) -> Union[str, Any]:
A = vocab_size
A = n_positions
A = n_embd
A = n_layer
A = n_head
A = n_inner
A = activation_function
A = resid_pdrop
A = embd_pdrop
A = attn_pdrop
A = layer_norm_epsilon
A = initializer_range
A = scale_attn_weights
A = use_cache
A = attention_softmax_in_fpaa
A = scale_attention_softmax_in_fpaa
A = multi_query
A = bos_token_id
A = eos_token_id
super().__init__(bos_token_id=A_ ,eos_token_id=A_ ,**A_ ) | 74 | 0 |
"""simple docstring"""
from string import ascii_uppercase
lowercase_ = {char: i for i, char in enumerate(ascii_uppercase)}
lowercase_ = dict(enumerate(ascii_uppercase))
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = len(snake_case__ )
__A = 0
while True:
if x == i:
__A = 0
if len(snake_case__ ) == len(snake_case__ ):
break
key += key[i]
i += 1
return key
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = ''''''
__A = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
__A = (dicta[letter] - dicta[key_new[i]]) % 2_6
i += 1
cipher_text += dicta[x]
return cipher_text
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = ''''''
__A = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
__A = (dicta[letter] + dicta[key_new[i]] + 2_6) % 2_6
i += 1
or_txt += dicta[x]
return or_txt
def lowerCAmelCase ( ):
"""simple docstring"""
__A = '''THE GERMAN ATTACK'''
__A = '''SECRET'''
__A = generate_key(snake_case__ , snake_case__ )
__A = cipher_text(snake_case__ , snake_case__ )
print(f'Encrypted Text = {s}' )
print(f'Original Text = {original_text(snake_case__ , snake_case__ )}' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 266 |
"""simple docstring"""
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
_lowercase = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F"""{bindir}/../../examples/pytorch/translation"""):
from run_translation import main # noqa
set_seed(42)
_lowercase = '''sshleifer/student_marian_en_ro_6_1'''
_lowercase = '''sshleifer/tiny-mbart'''
@require_torch
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Union[str, Any]=False ,A_ : Optional[int]=None ,A_ : List[str]=True ,A_ : Tuple=True ,A_ : Union[str, Any]=True ,A_ : List[str]=True ,) -> Tuple:
A = self.run_trainer(
eval_steps=1 ,max_len=12 ,model_name=A_ ,num_train_epochs=1 ,distributed=A_ ,extra_args_str=A_ ,predict_with_generate=A_ ,do_train=A_ ,do_eval=A_ ,do_predict=A_ ,)
A = TrainerState.load_from_json(os.path.join(A_ ,'trainer_state.json' ) ).log_history
if not do_eval:
return
A = [log for log in logs if 'eval_loss' in log.keys()]
A = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
A = eval_metrics[-1]
assert isinstance(last_step_stats['eval_bleu'] ,A_ )
assert not math.isnan(float(last_step_stats['eval_loss'] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def _SCREAMING_SNAKE_CASE ( self : int ) -> int:
self.run_seqaseq_quick(distributed=A_ )
@require_torch_multi_gpu
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
self.run_seqaseq_quick(distributed=A_ )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
self.run_seqaseq_quick(distributed=A_ ,extra_args_str='--sharded_ddp simple' )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def _SCREAMING_SNAKE_CASE ( self : Any ) -> int:
self.run_seqaseq_quick(distributed=A_ ,extra_args_str='--sharded_ddp simple --fp16' )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
self.run_seqaseq_quick(distributed=A_ ,extra_args_str='--sharded_ddp zero_dp_2' ,predict_with_generate=A_ )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
self.run_seqaseq_quick(
distributed=A_ ,extra_args_str='--sharded_ddp zero_dp_2 --fp16' ,predict_with_generate=A_ )
@require_apex
@require_torch_gpu
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=A_ ,extra_args_str='--fp16 --fp16_backend=apex' )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=A_ ,extra_args_str='--fp16 --fp16_backend=apex' )
@parameterized.expand(['base', 'low', 'high', 'mixed'] )
@require_torch_multi_gpu
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : Dict ) -> List[str]:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
A = {
# test with the default log_level - should be info and thus log info once
'base': {'extra_args_str': '', 'n_matches': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'low': {'extra_args_str': '--log_level debug --log_level_replica debug', 'n_matches': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'high': {'extra_args_str': '--log_level error --log_level_replica debug', 'n_matches': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'mixed': {'extra_args_str': '--log_level error --log_level_replica error', 'n_matches': 0},
}
A = experiments[experiment_id]
A = {'distributed': True, 'predict_with_generate': False, 'do_eval': False, 'do_predict': False}
A = 'Running training'
with CaptureStderr() as cl:
self.run_seqaseq_quick(**A_ ,extra_args_str=data['extra_args_str'] )
A = len(re.findall(A_ ,cl.err ) )
self.assertEqual(A_ ,data['n_matches'] )
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
A = self.run_trainer(
eval_steps=2 ,max_len=128 ,model_name=A_ ,learning_rate=3e-4 ,num_train_epochs=10 ,distributed=A_ ,)
# Check metrics
A = TrainerState.load_from_json(os.path.join(A_ ,'trainer_state.json' ) ).log_history
A = [log for log in logs if 'eval_loss' in log.keys()]
A = eval_metrics[0]
A = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['eval_bleu'] ,A_ )
# test if do_predict saves generations and metrics
A = os.listdir(A_ )
A = {os.path.basename(A_ ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(A_ : str ) -> Tuple[int, float]:
A = '--skip_memory_metrics 0'
A = self.run_trainer(
max_len=128 ,model_name=A_ ,learning_rate=3e-4 ,num_train_epochs=1 ,optim=A_ ,distributed=A_ ,extra_args_str=A_ ,do_eval=A_ ,do_predict=A_ ,n_gpus_to_use=1 ,)
# Check metrics
A = TrainerState.load_from_json(Path(A_ ,'trainer_state.json' ) ).log_history
A = int(logs[0]['train_mem_gpu_peaked_delta'] / 2**20 )
A = int(logs[0]['train_mem_gpu_alloc_delta'] / 2**20 )
A = logs[0]['train_loss']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
A , A , A = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
A , A , A = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
A = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
A = gpu_peak_mem_orig + gpu_alloc_mem_orig
A = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
A = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
A = 120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
A_ ,A_ ,'should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'
F' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'
F' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB' ,)
self.assertGreater(
A_ ,A_ ,'should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'
F' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'
F' gpu_total_mem_bnb={gpu_total_mem_bnb}MB' ,)
self.assertEqual(
A_ ,A_ ,F'loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}' )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : int ,A_ : str ,A_ : int ,A_ : float = 3e-3 ,A_ : str = "adafactor" ,A_ : bool = False ,A_ : str = None ,A_ : int = 0 ,A_ : bool = True ,A_ : bool = True ,A_ : bool = True ,A_ : bool = True ,A_ : int = None ,) -> Dict:
A = self.test_file_dir / '../fixtures/tests_samples/wmt_en_ro'
A = self.get_auto_remove_tmp_dir()
A = F'\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(A_ )}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(A_ )}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n '.split()
A = F'\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(A_ )}\n '.split()
A = '\n --do_predict\n '.split()
A = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F'--optim {optim}'.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
A = get_gpu_count()
A = get_torch_dist_unique_port()
A = F'\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n '.split()
A = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(A_ ,env=self.get_env() )
else:
A = ['run_translation.py'] + args
with patch.object(A_ ,'argv' ,A_ ):
main()
return output_dir | 74 | 0 |
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: str ) -> Optional[int]:
'''simple docstring'''
A__ = RobertaPreLayerNormConfig.from_pretrained(
snake_case__ , architectures=["RobertaPreLayerNormForMaskedLM"] )
# convert state_dict
A__ = torch.load(hf_hub_download(repo_id=snake_case__ , filename="pytorch_model.bin" ) )
A__ = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("roberta." ):
A__ = "roberta_prelayernorm." + tensor_key[len("roberta." ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(".self.LayerNorm.weight" ) or tensor_key.endswith(".self.LayerNorm.bias" ):
continue
A__ = tensor_value
A__ = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=snake_case__ , config=snake_case__ , state_dict=snake_case__ )
model.save_pretrained(snake_case__ )
# convert tokenizer
A__ = AutoTokenizer.from_pretrained(snake_case__ )
tokenizer.save_pretrained(snake_case__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint-repo""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCAmelCase__ = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 68 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''facebook/deit-base-distilled-patch16-224''': (
'''https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json'''
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[Any] = '''deit'''
def __init__( self : int ,A_ : Optional[Any]=768 ,A_ : Union[str, Any]=12 ,A_ : Dict=12 ,A_ : int=3072 ,A_ : Optional[Any]="gelu" ,A_ : Dict=0.0 ,A_ : Any=0.0 ,A_ : str=0.02 ,A_ : Tuple=1e-12 ,A_ : Union[str, Any]=224 ,A_ : Optional[Any]=16 ,A_ : List[Any]=3 ,A_ : Optional[Any]=True ,A_ : Optional[int]=16 ,**A_ : Union[str, Any] ,) -> Dict:
super().__init__(**A_ )
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = image_size
A = patch_size
A = num_channels
A = qkv_bias
A = encoder_stride
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: int = version.parse('''1.11''' )
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> float:
return 1e-4 | 74 | 0 |
"""simple docstring"""
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = """▁"""
lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """sentencepiece_model_ckpt""": """sentencepiece.bpe.model"""}
lowerCAmelCase = {
"""sentencepiece_model_file""": """sentencepiece.bpe.model""",
"""vocab_file""": """vocab.txt""",
}
lowerCAmelCase = {
"""vocab_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
},
"""sentencepiece_model_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
},
}
lowerCAmelCase = {
"""ernie-m-base""": 5_14,
"""ernie-m-large""": 5_14,
}
lowerCAmelCase = {
"""ernie-m-base""": {"""do_lower_case""": False},
"""ernie-m-large""": {"""do_lower_case""": False},
}
class A_ ( _lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ["input_ids"]
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = RESOURCE_FILES_NAMES
def __init__( self :List[Any] , lowerCamelCase_ :int , lowerCamelCase_ :Tuple=None , lowerCamelCase_ :List[str]=False , lowerCamelCase_ :Union[str, Any]="utf8" , lowerCamelCase_ :List[Any]="[UNK]" , lowerCamelCase_ :Optional[int]="[SEP]" , lowerCamelCase_ :str="[PAD]" , lowerCamelCase_ :int="[CLS]" , lowerCamelCase_ :str="[MASK]" , lowerCamelCase_ :Optional[Dict[str, Any]] = None , **lowerCamelCase_ :str , ):
"""simple docstring"""
lowerCamelCase__ : Dict ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=A_ , unk_token=A_ , sep_token=A_ , pad_token=A_ , cls_token=A_ , mask_token=A_ , vocab_file=A_ , encoding=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , )
lowerCamelCase__ : Any =do_lower_case
lowerCamelCase__ : str =sentencepiece_model_ckpt
lowerCamelCase__ : Optional[int] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A_ )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
lowerCamelCase__ : Optional[int] =self.load_vocab(filepath=A_ )
else:
lowerCamelCase__ : List[Any] ={self.sp_model.id_to_piece(A_ ): id for id in range(self.sp_model.get_piece_size() )}
lowerCamelCase__ : Optional[Any] ={v: k for k, v in self.vocab.items()}
def UpperCAmelCase__ ( self :Optional[int] , lowerCamelCase_ :List[str] ):
"""simple docstring"""
if text is None:
return None
lowerCamelCase__ : str =self.tokenize(A_ )
lowerCamelCase__ , lowerCamelCase__ : List[str] ='', []
for i, ch in enumerate(A_ ):
if ch in self.SP_CHAR_MAPPING:
lowerCamelCase__ : int =self.SP_CHAR_MAPPING.get(A_ )
else:
lowerCamelCase__ : Any =unicodedata.normalize('NFKC' , A_ )
if self.is_whitespace(A_ ):
continue
normalized_text += ch
char_mapping.extend([i] * len(A_ ) )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str =normalized_text, [], 0
if self.do_lower_case:
lowerCamelCase__ : Optional[int] =text.lower()
for token in split_tokens:
if token[:1] == "▁":
lowerCamelCase__ : List[str] =token[1:]
lowerCamelCase__ : List[str] =text[offset:].index(A_ ) + offset
lowerCamelCase__ : Dict =start + len(A_ )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
lowerCamelCase__ : Optional[Any] =end
return token_mapping
@property
def UpperCAmelCase__ ( self :List[Any] ):
"""simple docstring"""
return len(self.vocab )
def UpperCAmelCase__ ( self :Any ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self :Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : Any =self.__dict__.copy()
lowerCamelCase__ : Dict =None
return state
def __setstate__( self :Any , lowerCamelCase_ :Any ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCamelCase__ : Tuple ={}
lowerCamelCase__ : List[str] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def UpperCAmelCase__ ( self :str , lowerCamelCase_ :Union[str, Any] ):
"""simple docstring"""
return "".join((self.SP_CHAR_MAPPING.get(A_ , A_ ) for c in text) )
def UpperCAmelCase__ ( self :List[str] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :List[Any]=False , lowerCamelCase_ :Any=64 , lowerCamelCase_ :int=0.1 ):
"""simple docstring"""
if self.sp_model_kwargs.get('enable_sampling' ) is True:
lowerCamelCase__ : Union[str, Any] =True
if self.sp_model_kwargs.get('alpha' ) is not None:
lowerCamelCase__ : int =self.sp_model_kwargs.get('alpha' )
if self.sp_model_kwargs.get('nbest_size' ) is not None:
lowerCamelCase__ : str =self.sp_model_kwargs.get('nbest_size' )
if not enable_sampling:
lowerCamelCase__ : Optional[int] =self.sp_model.EncodeAsPieces(A_ )
else:
lowerCamelCase__ : List[str] =self.sp_model.SampleEncodeAsPieces(A_ , A_ , A_ )
lowerCamelCase__ : Optional[int] =[]
for pi, piece in enumerate(A_ ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(A_ ) and pi != 0:
new_pieces.append(A_ )
continue
else:
continue
lowerCamelCase__ : Any =0
for i, chunk in enumerate(A_ ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(A_ ) or self.is_punct(A_ ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(A_ )
lowerCamelCase__ : Optional[int] =i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowerCamelCase__ : Union[str, Any] =i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowerCamelCase__ : Dict =i
if len(A_ ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def UpperCAmelCase__ ( self :int , lowerCamelCase_ :Dict ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =''.join(A_ ).replace(A_ , ' ' ).strip()
return out_string
def UpperCAmelCase__ ( self :Any , lowerCamelCase_ :Tuple ):
"""simple docstring"""
lowerCamelCase__ : int =self.convert_ids_to_tokens(A_ )
lowerCamelCase__ : Union[str, Any] =''.join(A_ ).replace(A_ , ' ' ).strip()
return out_string
def UpperCAmelCase__ ( self :Optional[Any] , lowerCamelCase_ :str ):
"""simple docstring"""
return self.vocab.get(A_ , self.vocab.get(self.unk_token ) )
def UpperCAmelCase__ ( self :List[str] , lowerCamelCase_ :int ):
"""simple docstring"""
return self.reverse_vocab.get(A_ , self.unk_token )
def UpperCAmelCase__ ( self :List[Any] , lowerCamelCase_ :int , lowerCamelCase_ :int=None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase__ : int =[self.cls_token_id]
lowerCamelCase__ : List[str] =[self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def UpperCAmelCase__ ( self :Tuple , lowerCamelCase_ :str , lowerCamelCase_ :Dict=None ):
"""simple docstring"""
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def UpperCAmelCase__ ( self :Tuple , lowerCamelCase_ :str , lowerCamelCase_ :List[str]=None , lowerCamelCase_ :Tuple=False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1]
def UpperCAmelCase__ ( self :List[str] , lowerCamelCase_ :List[int] , lowerCamelCase_ :Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
# [CLS] X [SEP]
return (len(A_ ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(A_ ) + 1) + [1] * (len(A_ ) + 3)
def UpperCAmelCase__ ( self :List[Any] , lowerCamelCase_ :Optional[int] ):
"""simple docstring"""
if "\u4e00" <= char <= "\u9fff":
return True
return False
def UpperCAmelCase__ ( self :Optional[Any] , lowerCamelCase_ :int ):
"""simple docstring"""
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def UpperCAmelCase__ ( self :Any , lowerCamelCase_ :List[Any] ):
"""simple docstring"""
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def UpperCAmelCase__ ( self :List[str] , lowerCamelCase_ :str ):
"""simple docstring"""
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(A_ ) == 1:
lowerCamelCase__ : Optional[int] =unicodedata.category(A_ )
if cat == "Zs":
return True
return False
def UpperCAmelCase__ ( self :Optional[Any] , lowerCamelCase_ :Tuple ):
"""simple docstring"""
lowerCamelCase__ : Tuple ={}
with io.open(A_ , 'r' , encoding='utf-8' ) as f:
for index, line in enumerate(A_ ):
lowerCamelCase__ : str =line.rstrip('\n' )
lowerCamelCase__ : Optional[int] =int(A_ )
return token_to_idx
def UpperCAmelCase__ ( self :Union[str, Any] , lowerCamelCase_ :str , lowerCamelCase_ :Optional[str] = None ):
"""simple docstring"""
lowerCamelCase__ : Any =0
if os.path.isdir(A_ ):
lowerCamelCase__ : Union[str, Any] =os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
lowerCamelCase__ : Union[str, Any] =(filename_prefix + '-' if filename_prefix else '') + save_directory
with open(A_ , 'w' , encoding='utf-8' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda lowerCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
' Please check that the vocabulary is not corrupted!' )
lowerCamelCase__ : Tuple =token_index
writer.write(token + '\n' )
index += 1
lowerCamelCase__ : int =os.path.join(A_ , 'sentencepiece.bpe.model' )
with open(A_ , 'wb' ) as fi:
lowerCamelCase__ : List[Any] =self.sp_model.serialized_model_proto()
fi.write(A_ )
return (vocab_file,) | 126 |
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _snake_case ( snake_case__ : List[Any] , snake_case__ : Optional[int]=0.999 , snake_case__ : Union[str, Any]="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(snake_case__ : Union[str, Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(snake_case__ : Dict ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' )
A = []
for i in range(snake_case__ ):
A = i / num_diffusion_timesteps
A = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(snake_case__ ) / alpha_bar_fn(snake_case__ ) , snake_case__ ) )
return torch.tensor(snake_case__ , dtype=torch.floataa )
class lowerCAmelCase_ ( _lowercase , _lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[int] = [e.name for e in KarrasDiffusionSchedulers]
_lowerCamelCase: Optional[Any] = 2
@register_to_config
def __init__( self : str ,A_ : int = 1000 ,A_ : float = 0.0_00_85 ,A_ : float = 0.0_12 ,A_ : str = "linear" ,A_ : Optional[Union[np.ndarray, List[float]]] = None ,A_ : str = "epsilon" ,A_ : Optional[bool] = False ,A_ : Optional[bool] = False ,A_ : float = 1.0 ,A_ : str = "linspace" ,A_ : int = 0 ,) -> List[str]:
if trained_betas is not None:
A = torch.tensor(A_ ,dtype=torch.floataa )
elif beta_schedule == "linear":
A = torch.linspace(A_ ,A_ ,A_ ,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
A = (
torch.linspace(beta_start**0.5 ,beta_end**0.5 ,A_ ,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
A = betas_for_alpha_bar(A_ ,alpha_transform_type='cosine' )
elif beta_schedule == "exp":
A = betas_for_alpha_bar(A_ ,alpha_transform_type='exp' )
else:
raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}' )
A = 1.0 - self.betas
A = torch.cumprod(self.alphas ,dim=0 )
# set all values
self.set_timesteps(A_ ,A_ ,A_ )
A = use_karras_sigmas
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Tuple ,A_ : Tuple=None ) -> Tuple:
if schedule_timesteps is None:
A = self.timesteps
A = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
A = 1 if len(A_ ) > 1 else 0
else:
A = timestep.cpu().item() if torch.is_tensor(A_ ) else timestep
A = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : torch.FloatTensor ,A_ : Union[float, torch.FloatTensor] ,) -> torch.FloatTensor:
A = self.index_for_timestep(A_ )
A = self.sigmas[step_index]
A = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : int ,A_ : Union[str, torch.device] = None ,A_ : Optional[int] = None ,) -> Optional[Any]:
A = num_inference_steps
A = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
A = np.linspace(0 ,num_train_timesteps - 1 ,A_ ,dtype=A_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
A = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
A = (np.arange(0 ,A_ ) * step_ratio).round()[::-1].copy().astype(A_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
A = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
A = (np.arange(A_ ,0 ,-step_ratio )).round().copy().astype(A_ )
timesteps -= 1
else:
raise ValueError(
F'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' )
A = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
A = np.log(A_ )
A = np.interp(A_ ,np.arange(0 ,len(A_ ) ) ,A_ )
if self.config.use_karras_sigmas:
A = self._convert_to_karras(in_sigmas=A_ ,num_inference_steps=self.num_inference_steps )
A = np.array([self._sigma_to_t(A_ ,A_ ) for sigma in sigmas] )
A = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
A = torch.from_numpy(A_ ).to(device=A_ )
A = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
A = torch.from_numpy(A_ )
A = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(A_ ).startswith('mps' ):
# mps does not support float64
A = timesteps.to(A_ ,dtype=torch.floataa )
else:
A = timesteps.to(device=A_ )
# empty dt and derivative
A = None
A = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
A = defaultdict(A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[Any] ,A_ : List[str] ) -> Dict:
# get log sigma
A = np.log(A_ )
# get distribution
A = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
A = np.cumsum((dists >= 0) ,axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
A = low_idx + 1
A = log_sigmas[low_idx]
A = log_sigmas[high_idx]
# interpolate sigmas
A = (low - log_sigma) / (low - high)
A = np.clip(A_ ,0 ,1 )
# transform interpolation to time range
A = (1 - w) * low_idx + w * high_idx
A = t.reshape(sigma.shape )
return t
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : torch.FloatTensor ,A_ : int ) -> torch.FloatTensor:
A = in_sigmas[-1].item()
A = in_sigmas[0].item()
A = 7.0 # 7.0 is the value used in the paper
A = np.linspace(0 ,1 ,A_ )
A = sigma_min ** (1 / rho)
A = sigma_max ** (1 / rho)
A = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
return self.dt is None
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Union[torch.FloatTensor, np.ndarray] ,A_ : Union[float, torch.FloatTensor] ,A_ : Union[torch.FloatTensor, np.ndarray] ,A_ : bool = True ,) -> Union[SchedulerOutput, Tuple]:
A = self.index_for_timestep(A_ )
# advance index counter by 1
A = timestep.cpu().item() if torch.is_tensor(A_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
A = self.sigmas[step_index]
A = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
A = self.sigmas[step_index - 1]
A = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
A = 0
A = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
A = sigma_hat if self.state_in_first_order else sigma_next
A = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
A = sigma_hat if self.state_in_first_order else sigma_next
A = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
A = model_output
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' )
if self.config.clip_sample:
A = pred_original_sample.clamp(
-self.config.clip_sample_range ,self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
A = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
A = sigma_next - sigma_hat
# store for 2nd order step
A = derivative
A = dt
A = sample
else:
# 2. 2nd order / Heun's method
A = (sample - pred_original_sample) / sigma_next
A = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
A = self.dt
A = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
A = None
A = None
A = None
A = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=A_ )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : torch.FloatTensor ,A_ : torch.FloatTensor ,A_ : torch.FloatTensor ,) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
A = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(A_ ):
# mps does not support float64
A = self.timesteps.to(original_samples.device ,dtype=torch.floataa )
A = timesteps.to(original_samples.device ,dtype=torch.floataa )
else:
A = self.timesteps.to(original_samples.device )
A = timesteps.to(original_samples.device )
A = [self.index_for_timestep(A_ ,A_ ) for t in timesteps]
A = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
A = sigma.unsqueeze(-1 )
A = original_samples + noise * sigma
return noisy_samples
def __len__( self : Dict ) -> int:
return self.config.num_train_timesteps | 74 | 0 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase = get_tests_dir('''fixtures/test_sentencepiece_no_bos.model''')
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : List[str] = PegasusTokenizer
lowerCamelCase__ : Tuple = PegasusTokenizerFast
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : List[Any] = True
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
a__ =PegasusTokenizer(A_ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
return PegasusTokenizer.from_pretrained('''google/pegasus-large''' )
def _UpperCAmelCase ( self, **lowercase_ ) -> PegasusTokenizer:
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname, **A_ )
def _UpperCAmelCase ( self, lowercase_ ) -> str:
"""simple docstring"""
return ("This is a test", "This is a test")
def _UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
a__ ='''</s>'''
a__ =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ), A_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ), A_ )
def _UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
a__ =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], '''<pad>''' )
self.assertEqual(vocab_keys[1], '''</s>''' )
self.assertEqual(vocab_keys[-1], '''v''' )
self.assertEqual(len(A_ ), 1103 )
def _UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size, 1103 )
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
a__ =self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
a__ =self.tokenizer_class.from_pretrained(self.tmpdirname )
a__ =(
'''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'''
''' </s> <pad> <pad> <pad>'''
)
a__ =rust_tokenizer([raw_input_str], return_tensors=A_, add_special_tokens=A_ ).input_ids[0]
a__ =py_tokenizer([raw_input_str], return_tensors=A_, add_special_tokens=A_ ).input_ids[0]
self.assertListEqual(A_, A_ )
def _UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
a__ =self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
a__ ='''<mask_1> To ensure a <mask_2> flow of bank resolutions.'''
a__ =[2, 413, 615, 114, 3, 1971, 113, 1679, 10710, 107, 1]
a__ =tokenizer([raw_input_str], return_tensors=A_ ).input_ids[0]
self.assertListEqual(A_, A_ )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ =self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
a__ ='''To ensure a smooth flow of bank resolutions.'''
a__ =[413, 615, 114, 2291, 1971, 113, 1679, 10710, 107, 1]
a__ =tokenizer([raw_input_str], return_tensors=A_ ).input_ids[0]
self.assertListEqual(A_, A_ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def _UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
a__ =['''This is going to be way too long.''' * 150, '''short example''']
a__ =['''not super long but more than 5 tokens''', '''tiny''']
a__ =self._large_tokenizer(A_, padding=A_, truncation=A_, return_tensors='''pt''' )
a__ =self._large_tokenizer(
text_target=A_, max_length=5, padding=A_, truncation=A_, return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(A_ ) == 2 # input_ids, attention_mask.
@slow
def _UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
a__ ={'''input_ids''': [[38979, 143, 18485, 606, 130, 26669, 87686, 121, 54189, 1129, 111, 26669, 87686, 121, 9114, 14787, 121, 13249, 158, 592, 956, 121, 14621, 31576, 143, 62613, 108, 9688, 930, 43430, 11562, 62613, 304, 108, 11443, 897, 108, 9314, 17415, 63399, 108, 11443, 7614, 18316, 118, 4284, 7148, 12430, 143, 1400, 25703, 158, 111, 4284, 7148, 11772, 143, 21297, 1064, 158, 122, 204, 3506, 1754, 1133, 14787, 1581, 115, 33224, 4482, 111, 1355, 110, 29173, 317, 50833, 108, 20147, 94665, 111, 77198, 107, 1], [110, 62613, 117, 638, 112, 1133, 121, 20098, 1355, 79050, 13872, 135, 1596, 53541, 1352, 141, 13039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 18289, 17780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A_, model_name='''google/bigbird-pegasus-large-arxiv''', revision='''ba85d0851d708441f91440d509690f1ab6353415''', )
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Dict = PegasusTokenizer
lowerCamelCase__ : Dict = PegasusTokenizerFast
lowerCamelCase__ : Tuple = True
lowerCamelCase__ : Any = True
def _UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
a__ =PegasusTokenizer(A_, offset=0, mask_token_sent=A_, mask_token='''[MASK]''' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' )
def _UpperCAmelCase ( self, **lowercase_ ) -> PegasusTokenizer:
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname, **A_ )
def _UpperCAmelCase ( self, lowercase_ ) -> int:
"""simple docstring"""
return ("This is a test", "This is a test")
def _UpperCAmelCase ( self ) -> int:
"""simple docstring"""
a__ =self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
a__ =self.tokenizer_class.from_pretrained(self.tmpdirname )
a__ =(
'''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'''
''' <pad> <pad> <pad>'''
)
a__ =rust_tokenizer([raw_input_str], return_tensors=A_, add_special_tokens=A_ ).input_ids[0]
a__ =py_tokenizer([raw_input_str], return_tensors=A_, add_special_tokens=A_ ).input_ids[0]
self.assertListEqual(A_, A_ )
@require_torch
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
a__ =['''This is going to be way too long.''' * 1000, '''short example''']
a__ =['''not super long but more than 5 tokens''', '''tiny''']
a__ =self._large_tokenizer(A_, padding=A_, truncation=A_, return_tensors='''pt''' )
a__ =self._large_tokenizer(
text_target=A_, max_length=5, padding=A_, truncation=A_, return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(A_ ) == 2 # input_ids, attention_mask.
def _UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
a__ =(
'''This is an example string that is used to test the original TF implementation against the HF'''
''' implementation'''
)
a__ =self._large_tokenizer(A_ ).input_ids
self.assertListEqual(
A_, [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 25016, 3137, 464, 109, 26955, 3137, 1], )
| 188 |
"""simple docstring"""
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Dict ,A_ : list[int] ) -> None:
A = len(A_ )
A = [0] * len_array
if len_array > 0:
A = array[0]
for i in range(1 ,A_ ):
A = self.prefix_sum[i - 1] + array[i]
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : int ,A_ : int ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : int ) -> bool:
A = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(A_ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod() | 74 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase : Optional[int] = {
'configuration_nllb_moe': [
'NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP',
'NllbMoeConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[str] = [
'NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST',
'NllbMoeForConditionalGeneration',
'NllbMoeModel',
'NllbMoePreTrainedModel',
'NllbMoeTop2Router',
'NllbMoeSparseMLP',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 267 |
"""simple docstring"""
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
def _snake_case ( snake_case__ : str , snake_case__ : str ):
A = RobertaPreLayerNormConfig.from_pretrained(
snake_case__ , architectures=['RobertaPreLayerNormForMaskedLM'] )
# convert state_dict
A = torch.load(hf_hub_download(repo_id=snake_case__ , filename='pytorch_model.bin' ) )
A = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('roberta.' ):
A = 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ):
continue
A = tensor_value
A = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=snake_case__ , config=snake_case__ , state_dict=snake_case__ )
model.save_pretrained(snake_case__ )
# convert tokenizer
A = AutoTokenizer.from_pretrained(snake_case__ )
tokenizer.save_pretrained(snake_case__ )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint-repo''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowercase = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path) | 74 | 0 |
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={"vocab_file": "vocab.json"}
_lowerCamelCase ={
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
_lowerCamelCase ={"mgp-str": 27}
class a_ ( _lowercase ):
"""simple docstring"""
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[Any] ,snake_case : str ,snake_case : List[Any]="[GO]" ,snake_case : Any="[GO]" ,snake_case : Union[str, Any]="[s]" ,snake_case : Dict="[GO]" ,**snake_case : Any ):
super().__init__(
unk_token=A_ ,bos_token=A_ ,eos_token=A_ ,pad_token=A_ ,**A_ ,)
with open(A_ ,encoding='utf-8' ) as vocab_handle:
SCREAMING_SNAKE_CASE =json.load(A_ )
SCREAMING_SNAKE_CASE ={v: k for k, v in self.vocab.items()}
@property
def _lowerCAmelCase ( self : Union[str, Any] ):
return len(self.vocab )
def _lowerCAmelCase ( self : Optional[Any] ):
return dict(self.vocab ,**self.added_tokens_encoder )
def _lowerCAmelCase ( self : Dict ,snake_case : List[Any] ):
SCREAMING_SNAKE_CASE =[]
for s in text:
char_tokens.extend(A_ )
return char_tokens
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : List[Any] ):
return self.vocab.get(A_ ,self.vocab.get(self.unk_token ) )
def _lowerCAmelCase ( self : int ,snake_case : Optional[int] ):
return self.decoder.get(A_ )
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : str ,snake_case : Optional[str] = None ):
if not os.path.isdir(A_ ):
logger.error('Vocabulary path ({}) should be a directory'.format(A_ ) )
return
SCREAMING_SNAKE_CASE =os.path.join(
A_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
with open(A_ ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.vocab ,indent=2 ,sort_keys=A_ ,ensure_ascii=A_ ) + '\n' )
return (vocab_file,)
| 334 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'''
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[Any] = '''roformer'''
def __init__( self : Tuple ,A_ : Optional[int]=5_0000 ,A_ : Tuple=None ,A_ : Optional[Any]=768 ,A_ : Dict=12 ,A_ : Optional[int]=12 ,A_ : Union[str, Any]=3072 ,A_ : Dict="gelu" ,A_ : Dict=0.1 ,A_ : List[Any]=0.1 ,A_ : List[Any]=1536 ,A_ : List[str]=2 ,A_ : Any=0.02 ,A_ : str=1e-12 ,A_ : Optional[int]=0 ,A_ : List[str]=False ,A_ : Tuple=True ,**A_ : List[str] ,) -> Dict:
super().__init__(pad_token_id=A_ ,**A_ )
A = vocab_size
A = hidden_size if embedding_size is None else embedding_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = hidden_act
A = intermediate_size
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = initializer_range
A = layer_norm_eps
A = rotary_value
A = use_cache
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
A = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A = {0: 'batch', 1: 'sequence'}
A = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] ) | 74 | 0 |
"""simple docstring"""
import os
def UpperCAmelCase ( UpperCamelCase__ = "input.txt" ):
"""simple docstring"""
with open(os.path.join(os.path.dirname(snake_case__ ) , snake_case__ ) ) as input_file:
A__ = [
[int(snake_case__ ) for element in line.split(',' )]
for line in input_file.readlines()
]
A__ = len(snake_case__ )
A__ = len(matrix[0] )
A__ = [[-1 for _ in range(snake_case__ )] for _ in range(snake_case__ )]
for i in range(snake_case__ ):
A__ = matrix[i][0]
for j in range(1 , snake_case__ ):
for i in range(snake_case__ ):
A__ = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , snake_case__ ):
A__ = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
A__ = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 221 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def _snake_case ( snake_case__ : Dict ):
A = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
'decoder.output_projection.weight',
]
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def _snake_case ( snake_case__ : int ):
A , A = emb.weight.shape
A = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ )
A = emb.weight.data
return lin_layer
def _snake_case ( snake_case__ : List[str] , snake_case__ : Any="facebook/mbart-large-en-ro" , snake_case__ : Optional[int]=False , snake_case__ : List[str]=False ):
A = torch.load(snake_case__ , map_location='cpu' )['model']
remove_ignore_keys_(snake_case__ )
A = state_dict['encoder.embed_tokens.weight'].shape[0]
A = MBartConfig.from_pretrained(snake_case__ , vocab_size=snake_case__ )
if mbart_aa and finetuned:
A = 'relu'
A = state_dict['decoder.embed_tokens.weight']
A = MBartForConditionalGeneration(snake_case__ )
model.model.load_state_dict(snake_case__ )
if finetuned:
A = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
_lowercase = parser.parse_args()
_lowercase = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path) | 74 | 0 |
"""simple docstring"""
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
SCREAMING_SNAKE_CASE : int = abspath(join(dirname(dirname(__file__)), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def lowercase ( _snake_case : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case__ )
def lowercase ( _snake_case : int ) ->Optional[Any]:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
__snake_case : Any = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(snake_case__ , id=snake_case__ )
| 102 |
"""simple docstring"""
import argparse
import struct
import unittest
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Tuple ,A_ : bytes ) -> None:
A = data
# Initialize hash values
A = [
0X6_A_0_9_E_6_6_7,
0XB_B_6_7_A_E_8_5,
0X3_C_6_E_F_3_7_2,
0XA_5_4_F_F_5_3_A,
0X5_1_0_E_5_2_7_F,
0X9_B_0_5_6_8_8_C,
0X1_F_8_3_D_9_A_B,
0X5_B_E_0_C_D_1_9,
]
# Initialize round constants
A = [
0X4_2_8_A_2_F_9_8,
0X7_1_3_7_4_4_9_1,
0XB_5_C_0_F_B_C_F,
0XE_9_B_5_D_B_A_5,
0X3_9_5_6_C_2_5_B,
0X5_9_F_1_1_1_F_1,
0X9_2_3_F_8_2_A_4,
0XA_B_1_C_5_E_D_5,
0XD_8_0_7_A_A_9_8,
0X1_2_8_3_5_B_0_1,
0X2_4_3_1_8_5_B_E,
0X5_5_0_C_7_D_C_3,
0X7_2_B_E_5_D_7_4,
0X8_0_D_E_B_1_F_E,
0X9_B_D_C_0_6_A_7,
0XC_1_9_B_F_1_7_4,
0XE_4_9_B_6_9_C_1,
0XE_F_B_E_4_7_8_6,
0X0_F_C_1_9_D_C_6,
0X2_4_0_C_A_1_C_C,
0X2_D_E_9_2_C_6_F,
0X4_A_7_4_8_4_A_A,
0X5_C_B_0_A_9_D_C,
0X7_6_F_9_8_8_D_A,
0X9_8_3_E_5_1_5_2,
0XA_8_3_1_C_6_6_D,
0XB_0_0_3_2_7_C_8,
0XB_F_5_9_7_F_C_7,
0XC_6_E_0_0_B_F_3,
0XD_5_A_7_9_1_4_7,
0X0_6_C_A_6_3_5_1,
0X1_4_2_9_2_9_6_7,
0X2_7_B_7_0_A_8_5,
0X2_E_1_B_2_1_3_8,
0X4_D_2_C_6_D_F_C,
0X5_3_3_8_0_D_1_3,
0X6_5_0_A_7_3_5_4,
0X7_6_6_A_0_A_B_B,
0X8_1_C_2_C_9_2_E,
0X9_2_7_2_2_C_8_5,
0XA_2_B_F_E_8_A_1,
0XA_8_1_A_6_6_4_B,
0XC_2_4_B_8_B_7_0,
0XC_7_6_C_5_1_A_3,
0XD_1_9_2_E_8_1_9,
0XD_6_9_9_0_6_2_4,
0XF_4_0_E_3_5_8_5,
0X1_0_6_A_A_0_7_0,
0X1_9_A_4_C_1_1_6,
0X1_E_3_7_6_C_0_8,
0X2_7_4_8_7_7_4_C,
0X3_4_B_0_B_C_B_5,
0X3_9_1_C_0_C_B_3,
0X4_E_D_8_A_A_4_A,
0X5_B_9_C_C_A_4_F,
0X6_8_2_E_6_F_F_3,
0X7_4_8_F_8_2_E_E,
0X7_8_A_5_6_3_6_F,
0X8_4_C_8_7_8_1_4,
0X8_C_C_7_0_2_0_8,
0X9_0_B_E_F_F_F_A,
0XA_4_5_0_6_C_E_B,
0XB_E_F_9_A_3_F_7,
0XC_6_7_1_7_8_F_2,
]
A = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : bytes ) -> bytes:
A = B'\x80' + (B'\x00' * (63 - (len(A_ ) + 8) % 64))
A = struct.pack('>Q' ,(len(A_ ) * 8) )
return data + padding + big_endian_integer
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> None:
# Convert into blocks of 64 bytes
A = [
self.preprocessed_data[x : x + 64]
for x in range(0 ,len(self.preprocessed_data ) ,64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
A = list(struct.unpack('>16L' ,A_ ) )
# add 48 0-ed integers
words += [0] * 48
A , A , A , A , A , A , A , A = self.hashes
for index in range(0 ,64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
A = (
self.ror(words[index - 15] ,7 )
^ self.ror(words[index - 15] ,18 )
^ (words[index - 15] >> 3)
)
A = (
self.ror(words[index - 2] ,17 )
^ self.ror(words[index - 2] ,19 )
^ (words[index - 2] >> 10)
)
A = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X1_0_0_0_0_0_0_0_0
# Compression
A = self.ror(A_ ,6 ) ^ self.ror(A_ ,11 ) ^ self.ror(A_ ,25 )
A = (e & f) ^ ((~e & 0XF_F_F_F_F_F_F_F) & g)
A = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X1_0_0_0_0_0_0_0_0
A = self.ror(A_ ,2 ) ^ self.ror(A_ ,13 ) ^ self.ror(A_ ,22 )
A = (a & b) ^ (a & c) ^ (b & c)
A = (sa + maj) % 0X1_0_0_0_0_0_0_0_0
A , A , A , A , A , A , A , A = (
g,
f,
e,
((d + tempa) % 0X1_0_0_0_0_0_0_0_0),
c,
b,
a,
((tempa + tempa) % 0X1_0_0_0_0_0_0_0_0),
)
A = [a, b, c, d, e, f, g, h]
# Modify final values
A = [
((element + mutated_hash_values[index]) % 0X1_0_0_0_0_0_0_0_0)
for index, element in enumerate(self.hashes )
]
A = ''.join([hex(A_ )[2:].zfill(8 ) for value in self.hashes] )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : int ,A_ : int ) -> int:
return 0XF_F_F_F_F_F_F_F & (value << (32 - rotations)) | (value >> rotations)
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> None:
import hashlib
A = bytes('Test String' ,'utf-8' )
self.assertEqual(SHAaaa(A_ ).hash ,hashlib.shaaaa(A_ ).hexdigest() )
def _snake_case ( ):
import doctest
doctest.testmod()
A = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
A = parser.parse_args()
A = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
A = f.read()
else:
A = bytes(snake_case__ , 'utf-8' )
print(SHAaaa(snake_case__ ).hash )
if __name__ == "__main__":
main() | 74 | 0 |
import argparse
import os
import re
__lowercase = '''src/diffusers'''
# Pattern that looks at the indentation in a line.
__lowercase = re.compile(r'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
__lowercase = re.compile(r'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
__lowercase = re.compile(r'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
__lowercase = re.compile(r'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
__lowercase = re.compile(r'''\[([^\]]+)\]''')
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = _re_indent.search(snake_case__ )
return "" if search is None else search.groups()[0]
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="" , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
__UpperCamelCase :List[Any] = 0
__UpperCamelCase :Tuple = code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(snake_case__ ):
index += 1
__UpperCamelCase :List[str] = ['''\n'''.join(lines[:index] )]
else:
__UpperCamelCase :str = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
__UpperCamelCase :Tuple = [lines[index]]
index += 1
while index < len(snake_case__ ) and (end_prompt is None or not lines[index].startswith(snake_case__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(snake_case__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(snake_case__ ) )
if index < len(snake_case__ ) - 1:
__UpperCamelCase :Dict = [lines[index + 1]]
index += 1
else:
__UpperCamelCase :List[Any] = []
else:
blocks.append('''\n'''.join(snake_case__ ) )
__UpperCamelCase :Union[str, Any] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(snake_case__ ) > 0:
blocks.append('''\n'''.join(snake_case__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(snake_case__ ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def _inner(SCREAMING_SNAKE_CASE ):
return key(snake_case__ ).lower().replace('''_''' , '''''' )
return _inner
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
def noop(SCREAMING_SNAKE_CASE ):
return x
if key is None:
__UpperCamelCase :Tuple = noop
# Constants are all uppercase, they go first.
__UpperCamelCase :Optional[int] = [obj for obj in objects if key(snake_case__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
__UpperCamelCase :Tuple = [obj for obj in objects if key(snake_case__ )[0].isupper() and not key(snake_case__ ).isupper()]
# Functions begin with a lowercase, they go last.
__UpperCamelCase :Optional[Any] = [obj for obj in objects if not key(snake_case__ )[0].isupper()]
__UpperCamelCase :List[Any] = ignore_underscore(snake_case__ )
return sorted(snake_case__ , key=snake_case__ ) + sorted(snake_case__ , key=snake_case__ ) + sorted(snake_case__ , key=snake_case__ )
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def _replace(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Union[str, Any] = match.groups()[0]
if "," not in imports:
return f"""[{imports}]"""
__UpperCamelCase :str = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__UpperCamelCase :List[Any] = keys[:-1]
return "[" + ", ".join([f"""\"{k}\"""" for k in sort_objects(snake_case__ )] ) + "]"
__UpperCamelCase :str = import_statement.split('''\n''' )
if len(snake_case__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
__UpperCamelCase :str = 2 if lines[1].strip() == '''[''' else 1
__UpperCamelCase :Optional[Any] = [(i, _re_strip_line.search(snake_case__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
__UpperCamelCase :Any = sort_objects(snake_case__ , key=lambda SCREAMING_SNAKE_CASE : x[1] )
__UpperCamelCase :Optional[int] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(snake_case__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
__UpperCamelCase :str = _re_bracket_content.sub(_replace , lines[1] )
else:
__UpperCamelCase :Tuple = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__UpperCamelCase :str = keys[:-1]
__UpperCamelCase :Optional[Any] = get_indent(lines[1] ) + ''', '''.join([f"""\"{k}\"""" for k in sort_objects(snake_case__ )] )
return "\n".join(snake_case__ )
else:
# Finally we have to deal with imports fitting on one line
__UpperCamelCase :List[Any] = _re_bracket_content.sub(_replace , snake_case__ )
return import_statement
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ):
'''simple docstring'''
with open(snake_case__ , '''r''' ) as f:
__UpperCamelCase :int = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
__UpperCamelCase :Union[str, Any] = split_code_in_indented_blocks(
snake_case__ , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(snake_case__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
__UpperCamelCase :List[str] = main_blocks[block_idx]
__UpperCamelCase :Tuple = block.split('''\n''' )
# Get to the start of the imports.
__UpperCamelCase :Any = 0
while line_idx < len(snake_case__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
__UpperCamelCase :Union[str, Any] = len(snake_case__ )
else:
line_idx += 1
if line_idx >= len(snake_case__ ):
continue
# Ignore beginning and last line: they don't contain anything.
__UpperCamelCase :Tuple = '''\n'''.join(block_lines[line_idx:-1] )
__UpperCamelCase :str = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
__UpperCamelCase :Tuple = split_code_in_indented_blocks(snake_case__ , indent_level=snake_case__ )
# We have two categories of import key: list or _import_structure[key].append/extend
__UpperCamelCase :Optional[Any] = _re_direct_key if '''_import_structure''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
__UpperCamelCase :List[str] = [(pattern.search(snake_case__ ).groups()[0] if pattern.search(snake_case__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
__UpperCamelCase :List[str] = [(i, key) for i, key in enumerate(snake_case__ ) if key is not None]
__UpperCamelCase :Optional[Any] = [x[0] for x in sorted(snake_case__ , key=lambda SCREAMING_SNAKE_CASE : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
__UpperCamelCase :Tuple = 0
__UpperCamelCase :str = []
for i in range(len(snake_case__ ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
__UpperCamelCase :str = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(snake_case__ )
count += 1
# And we put our main block back together with its first and last line.
__UpperCamelCase :str = '''\n'''.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(snake_case__ ):
if check_only:
return True
else:
print(f"""Overwriting {file}.""" )
with open(snake_case__ , '''w''' ) as f:
f.write('''\n'''.join(snake_case__ ) )
def lowerCamelCase ( SCREAMING_SNAKE_CASE=True ):
'''simple docstring'''
__UpperCamelCase :List[Any] = []
for root, _, files in os.walk(snake_case__ ):
if "__init__.py" in files:
__UpperCamelCase :int = sort_imports(os.path.join(snake_case__ , '''__init__.py''' ) , check_only=snake_case__ )
if result:
__UpperCamelCase :Union[str, Any] = [os.path.join(snake_case__ , '''__init__.py''' )]
if len(snake_case__ ) > 0:
raise ValueError(f"""Would overwrite {len(snake_case__ )} files, run `make style`.""" )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
__lowercase = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 43 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowercase = {'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''DeiTFeatureExtractor''']
_lowercase = ['''DeiTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DeiTForImageClassification''',
'''DeiTForImageClassificationWithTeacher''',
'''DeiTForMaskedImageModeling''',
'''DeiTModel''',
'''DeiTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDeiTForImageClassification''',
'''TFDeiTForImageClassificationWithTeacher''',
'''TFDeiTForMaskedImageModeling''',
'''TFDeiTModel''',
'''TFDeiTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 74 | 0 |
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
__UpperCamelCase : Optional[Any] = "Create a default config file for Accelerate with only a few flags set."
def _a ( SCREAMING_SNAKE_CASE : str="no" , SCREAMING_SNAKE_CASE : str = default_json_config_file , SCREAMING_SNAKE_CASE : bool = False ):
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = Path(snake_case__ )
path.parent.mkdir(parents=snake_case__ , exist_ok=snake_case__ )
if path.exists():
print(
F"Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`." )
return False
UpperCamelCase__ : Union[str, Any] = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F"`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}" )
UpperCamelCase__ : Optional[Any] = {
'''compute_environment''': '''LOCAL_MACHINE''',
'''mixed_precision''': mixed_precision,
}
if torch.cuda.is_available():
UpperCamelCase__ : Optional[Any] = torch.cuda.device_count()
UpperCamelCase__ : Any = num_gpus
UpperCamelCase__ : List[Any] = False
if num_gpus > 1:
UpperCamelCase__ : List[str] = '''MULTI_GPU'''
else:
UpperCamelCase__ : List[Any] = '''NO'''
elif is_xpu_available() and use_xpu:
UpperCamelCase__ : Dict = torch.xpu.device_count()
UpperCamelCase__ : List[Any] = num_xpus
UpperCamelCase__ : Union[str, Any] = False
if num_xpus > 1:
UpperCamelCase__ : List[Any] = '''MULTI_XPU'''
else:
UpperCamelCase__ : Union[str, Any] = '''NO'''
elif is_npu_available():
UpperCamelCase__ : List[str] = torch.npu.device_count()
UpperCamelCase__ : Optional[int] = num_npus
UpperCamelCase__ : Optional[Any] = False
if num_npus > 1:
UpperCamelCase__ : Dict = '''MULTI_NPU'''
else:
UpperCamelCase__ : Optional[Any] = '''NO'''
else:
UpperCamelCase__ : Union[str, Any] = 0
UpperCamelCase__ : Tuple = True
UpperCamelCase__ : List[Any] = 1
UpperCamelCase__ : Any = '''NO'''
UpperCamelCase__ : Optional[int] = ClusterConfig(**snake_case__ )
config.to_json_file(snake_case__ )
return path
def _a ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ : Tuple = parser.add_parser('''default''' , parents=snake_case__ , help=snake_case__ , formatter_class=snake_case__ )
parser.add_argument(
'''--config_file''' , default=snake_case__ , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , dest='''save_location''' , )
parser.add_argument(
'''--mixed_precision''' , choices=['''no''', '''fp16''', '''bf16'''] , type=snake_case__ , help='''Whether or not to use mixed precision training. '''
'''Choose between FP16 and BF16 (bfloat16) training. '''
'''BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.''' , default='''no''' , )
parser.set_defaults(func=snake_case__ )
return parser
def _a ( SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F"accelerate configuration saved at {config_file}" )
| 146 |
"""simple docstring"""
from __future__ import annotations
import requests
def _snake_case ( snake_case__ : str ):
A = F'https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'
return requests.get(snake_case__ ).json()
def _snake_case ( snake_case__ : int = 10 ):
A = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'
A = requests.get(snake_case__ ).json()[:max_stories]
return [get_hackernews_story(snake_case__ ) for story_id in story_ids]
def _snake_case ( snake_case__ : int = 10 ):
A = hackernews_top_stories(snake_case__ )
return "\n".join('* [{title}]({url})'.format(**snake_case__ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown()) | 74 | 0 |
'''simple docstring'''
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int = 10 , SCREAMING_SNAKE_CASE__ : int = 22 ):
'''simple docstring'''
UpperCAmelCase__ = range(1 , snake_case__ )
UpperCAmelCase__ = range(1 , snake_case__ )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f"{solution(1_0, 2_2) = }")
| 346 |
"""simple docstring"""
from string import ascii_uppercase
_lowercase = {char: i for i, char in enumerate(ascii_uppercase)}
_lowercase = dict(enumerate(ascii_uppercase))
def _snake_case ( snake_case__ : str , snake_case__ : str ):
A = len(snake_case__ )
A = 0
while True:
if x == i:
A = 0
if len(snake_case__ ) == len(snake_case__ ):
break
key += key[i]
i += 1
return key
def _snake_case ( snake_case__ : str , snake_case__ : str ):
A = ''
A = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
A = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def _snake_case ( snake_case__ : str , snake_case__ : str ):
A = ''
A = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
A = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def _snake_case ( ):
A = 'THE GERMAN ATTACK'
A = 'SECRET'
A = generate_key(snake_case__ , snake_case__ )
A = cipher_text(snake_case__ , snake_case__ )
print(F'Encrypted Text = {s}' )
print(F'Original Text = {original_text(snake_case__ , snake_case__ )}' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 74 | 0 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
lowercase_ = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class snake_case ( _lowercase ):
'''simple docstring'''
A_ : bool = field(default=_lowercase , metadata={"help": "Whether to use SortishSampler or not."} )
A_ : bool = field(
default=_lowercase , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
A_ : Optional[int] = field(
default=_lowercase , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
A_ : Optional[int] = field(
default=_lowercase , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
A_ : Optional[Union[str, Path, GenerationConfig]] = field(
default=_lowercase , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
__A = super().to_dict()
for k, v in d.items():
if isinstance(A_, A_ ):
__A = v.to_dict()
return d
| 266 |
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : List[Any] ) -> Optional[Any]:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] ,model_result['ss'] ):
A = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(A_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
A = 'sshleifer/tiny-gpt2'
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
A = 'sgugger/tiny-distilbert-classification'
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,only_pretrain_model=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
A = 'sshleifer/tiny-gpt2'
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,torchscript=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == 'cpu' ,'Cant do half precision' )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
A = 'sshleifer/tiny-gpt2'
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,fpaa=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
A = 'sshleifer/tiny-gpt2'
A = AutoConfig.from_pretrained(A_ )
# set architectures equal to `None`
A = None
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ ,configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
A = 'sshleifer/tiny-gpt2'
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == 'cpu' ,'Can\'t do half precision' )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
A = 'sshleifer/tiny-gpt2'
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,fpaa=A_ ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
A = 'sshleifer/tiny-gpt2'
A = AutoConfig.from_pretrained(A_ )
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ ,configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
A = 'sshleifer/tinier_bart'
A = AutoConfig.from_pretrained(A_ )
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ ,configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
A = 'sshleifer/tiny-gpt2'
A = AutoConfig.from_pretrained(A_ )
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ ,configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
A = 'sshleifer/tinier_bart'
A = AutoConfig.from_pretrained(A_ )
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ ,configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
A = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,save_to_csv=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(A_ ,'inf_time.csv' ) ,train_memory_csv_file=os.path.join(A_ ,'train_mem.csv' ) ,inference_memory_csv_file=os.path.join(A_ ,'inf_mem.csv' ) ,train_time_csv_file=os.path.join(A_ ,'train_time.csv' ) ,env_info_csv_file=os.path.join(A_ ,'env.csv' ) ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
benchmark.run()
self.assertTrue(Path(os.path.join(A_ ,'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(A_ ,'train_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(A_ ,'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(A_ ,'train_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(A_ ,'env.csv' ) ).exists() )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
A = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(A_ : Optional[int] ):
self.assertTrue(hasattr(A_ ,'sequential' ) )
self.assertTrue(hasattr(A_ ,'cumulative' ) )
self.assertTrue(hasattr(A_ ,'current' ) )
self.assertTrue(hasattr(A_ ,'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(A_ ,'log.txt' ) ,log_print=A_ ,trace_memory_line_by_line=A_ ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(A_ ,'log.txt' ) ).exists() ) | 74 | 0 |
from __future__ import annotations
import math
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int ) -> int:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int ) -> List[str]:
'''simple docstring'''
A__ = str(snake_case__ )
A__ = [n]
for i in range(1 , len(snake_case__ ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int ) -> str:
'''simple docstring'''
if len(str(snake_case__ ) ) > 3:
if not is_prime(int(str(snake_case__ )[-3:] ) ) or not is_prime(int(str(snake_case__ )[:3] ) ):
return False
return True
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int = 1_1 ) -> Optional[Any]:
'''simple docstring'''
A__ = []
A__ = 1_3
while len(snake_case__ ) != count:
if validate(snake_case__ ):
A__ = list_truncated_nums(snake_case__ )
if all(is_prime(snake_case__ ) for i in list_nums ):
list_truncated_primes.append(snake_case__ )
num += 2
return list_truncated_primes
def lowerCAmelCase__ ( ) -> Tuple:
'''simple docstring'''
return sum(compute_truncated_primes(1_1 ) )
if __name__ == "__main__":
print(f"""{sum(compute_truncated_primes(1_1)) = }""")
| 68 |
"""simple docstring"""
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
_lowercase = re.compile(r'''^(?P<major>\d+)''' r'''\.(?P<minor>\d+)''' r'''\.(?P<patch>\d+)$''')
@total_ordering
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
_lowerCamelCase: str
_lowerCamelCase: Optional[str] = None
_lowerCamelCase: Optional[Union[str, int]] = None
_lowerCamelCase: Optional[Union[str, int]] = None
_lowerCamelCase: Optional[Union[str, int]] = None
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
A , A , A = _str_to_version_tuple(self.version_str )
def __repr__( self : Optional[int] ) -> Dict:
return F'{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
return self.major, self.minor, self.patch
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Tuple ) -> Union[str, Any]:
if isinstance(A_ ,A_ ):
return Version(A_ )
elif isinstance(A_ ,A_ ):
return other
raise TypeError(F'{other} (type {type(A_ )}) cannot be compared to version.' )
def __eq__( self : List[Any] ,A_ : Dict ) -> Any:
try:
A = self._validate_operand(A_ )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self : List[Any] ,A_ : Optional[int] ) -> Tuple:
A = self._validate_operand(A_ )
return self.tuple < other.tuple
def __hash__( self : Union[str, Any] ) -> Union[str, Any]:
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any ,A_ : List[str] ) -> List[str]:
A = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
return self.version_str
def _snake_case ( snake_case__ : List[str] ):
A = _VERSION_REG.match(snake_case__ )
if not res:
raise ValueError(F'Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.' )
return tuple(int(snake_case__ ) for v in [res.group('major' ), res.group('minor' ), res.group('patch' )] )
def _snake_case ( snake_case__ : str ):
return ".".join(str(snake_case__ ) for v in version_tuple ) | 74 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
lowerCAmelCase = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : Tuple , snake_case_ : str , snake_case_ : Dict , snake_case_ : List[Any] ) ->int:
for attribute in key.split('.' ):
lowerCamelCase__ : List[str] =getattr(snake_case__ , snake_case__ )
if weight_type is not None:
lowerCamelCase__ : Union[str, Any] =getattr(snake_case__ , snake_case__ ).shape
else:
lowerCamelCase__ : str =hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
lowerCamelCase__ : List[Any] =value
elif weight_type == "weight_g":
lowerCamelCase__ : Optional[int] =value
elif weight_type == "weight_v":
lowerCamelCase__ : Any =value
elif weight_type == "bias":
lowerCamelCase__ : Optional[Any] =value
else:
lowerCamelCase__ : Tuple =value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : List[str] ) ->Tuple:
lowerCamelCase__ : Dict =[]
lowerCamelCase__ : Dict =fairseq_model.state_dict()
lowerCamelCase__ : Optional[Any] =hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
lowerCamelCase__ : str =None
for name, value in fairseq_dict.items():
lowerCamelCase__ : Union[str, Any] =False
if "conv_layers" in name:
load_conv_layer(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , hf_model.config.feat_extract_norm == 'group' , )
lowerCamelCase__ : int =True
elif name.split('.' )[0] == "proj":
lowerCamelCase__ : Dict =fairseq_model.proj
lowerCamelCase__ : Optional[Any] =True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
lowerCamelCase__ : str =True
if "*" in mapped_key:
lowerCamelCase__ : str =name.split(snake_case__ )[0].split('.' )[-2]
lowerCamelCase__ : List[Any] =mapped_key.replace('*' , snake_case__ )
if "weight_g" in name:
lowerCamelCase__ : Any ='weight_g'
elif "weight_v" in name:
lowerCamelCase__ : List[Any] ='weight_v'
elif "bias" in name:
lowerCamelCase__ : str ='bias'
elif "weight" in name:
lowerCamelCase__ : List[str] ='weight'
else:
lowerCamelCase__ : Dict =None
set_recursively(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
continue
if not is_used:
unused_weights.append(snake_case__ )
logger.warning(f"""Unused weights: {unused_weights}""" )
return proj_weight
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : str , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : List[Any] ) ->Any:
lowerCamelCase__ : Optional[Any] =full_name.split('conv_layers.' )[-1]
lowerCamelCase__ : Union[str, Any] =name.split('.' )
lowerCamelCase__ : Union[str, Any] =int(items[0] )
lowerCamelCase__ : Optional[Any] =int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
lowerCamelCase__ : Union[str, Any] =value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
lowerCamelCase__ : Any =value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
lowerCamelCase__ : str =value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
lowerCamelCase__ : str =value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(snake_case__ )
def lowerCAmelCase_ ( snake_case_ : Optional[int] ) ->List[Any]:
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =emb.weight.shape
lowerCamelCase__ : int =nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ )
lowerCamelCase__ : List[str] =emb.weight.data
return lin_layer
def lowerCAmelCase_ ( snake_case_ : str ) ->Union[str, Any]:
with open(snake_case__ , 'r' , encoding='utf-8' ) as f:
lowerCamelCase__ : List[str] =f.readlines()
lowerCamelCase__ : str =[line.split(' ' )[0] for line in lines]
lowerCamelCase__ : Union[str, Any] =len(snake_case__ )
lowerCamelCase__ : List[Any] ={
'<s>': 0,
'<pad>': 1,
'</s>': 2,
'<unk>': 3,
}
vocab_dict.update(dict(zip(snake_case__ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : Optional[int] , snake_case_ : Any , snake_case_ : int , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : Optional[Any] , ) ->Dict:
lowerCamelCase__ : Tuple =WavaVecaConfig.from_pretrained(snake_case__ )
lowerCamelCase__ : Optional[Any] =SpeechaTextaConfig.from_pretrained(
snake_case__ , vocab_size=snake_case__ , decoder_layers=snake_case__ , do_stable_layer_norm=snake_case__ )
lowerCamelCase__ : Optional[Any] =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=snake_case__ , return_attention_mask=snake_case__ , )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Any =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
lowerCamelCase__ : List[Any] =model[0].eval()
# set weights for wav2vec2 encoder
lowerCamelCase__ : Dict =WavaVecaModel(snake_case__ )
lowerCamelCase__ : Tuple =recursively_load_weights_wavaveca(model.encoder , snake_case__ )
lowerCamelCase__ : str =SpeechaTextaForCausalLM(snake_case__ )
lowerCamelCase__ , lowerCamelCase__ : int =hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=snake_case__ )
# set output linear layer
unexpected_keys.remove('embed_out' )
lowerCamelCase__ : List[str] =nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(f"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
lowerCamelCase__ : Optional[int] =SpeechEncoderDecoderModel(encoder=snake_case__ , decoder=snake_case__ )
lowerCamelCase__ : Union[str, Any] =False
# add projection layer
lowerCamelCase__ : int =nn.Parameter(projection_layer.weight )
lowerCamelCase__ : Any =nn.Parameter(projection_layer.bias )
lowerCamelCase__ : Union[str, Any] =create_vocab_dict(snake_case__ )
with open(os.path.join(snake_case__ , 'vocab.json' ) , 'w' ) as fp:
json.dump(snake_case__ , snake_case__ )
lowerCamelCase__ : Union[str, Any] =SpeechaTextaTokenizer(os.path.join(snake_case__ , 'vocab.json' ) )
tokenizer.save_pretrained(snake_case__ )
lowerCamelCase__ : int =hf_wavavec.config.to_dict()
lowerCamelCase__ : Optional[int] =tokenizer.pad_token_id
lowerCamelCase__ : int =tokenizer.bos_token_id
lowerCamelCase__ : Any =tokenizer.eos_token_id
lowerCamelCase__ : Dict ='speech_to_text_2'
lowerCamelCase__ : List[Any] ='wav2vec2'
lowerCamelCase__ : Dict =SpeechEncoderDecoderConfig.from_dict(snake_case__ )
hf_wavavec.save_pretrained(snake_case__ )
feature_extractor.save_pretrained(snake_case__ )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-large-lv60""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/s2t-small-mustc-en-fr-st""",
type=str,
help="""Path to hf decoder s2t checkpoint config""",
)
parser.add_argument("""--vocab_size""", default=1_02_24, type=int, help="""Vocab size of decoder""")
parser.add_argument("""--num_decoder_layers""", default=7, type=int, help="""Number of decoder layers""")
lowerCAmelCase = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
) | 126 |
"""simple docstring"""
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
_lowercase = NewType('''DataClass''', Any)
_lowercase = NewType('''DataClassType''', Any)
def _snake_case ( snake_case__ : Tuple ):
if isinstance(snake_case__ , snake_case__ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F'Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).' )
def _snake_case ( snake_case__ : list ):
A = {str(snake_case__ ): choice for choice in choices}
return lambda snake_case__ : str_to_choice.get(snake_case__ , snake_case__ )
def _snake_case ( *,
snake_case__ : Union[str, List[str]] = None , snake_case__ : str = None , snake_case__ : Any = dataclasses.MISSING , snake_case__ : Callable[[], Any] = dataclasses.MISSING , snake_case__ : dict = None , **snake_case__ : Any , ):
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
A = {}
if aliases is not None:
A = aliases
if help is not None:
A = help
return dataclasses.field(metadata=snake_case__ , default=snake_case__ , default_factory=snake_case__ , **snake_case__ )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Iterable[DataClassType]
def __init__( self : List[str] ,A_ : Union[DataClassType, Iterable[DataClassType]] ,**A_ : Any ) -> Optional[int]:
# To make the default appear when using --help
if "formatter_class" not in kwargs:
A = ArgumentDefaultsHelpFormatter
super().__init__(**A_ )
if dataclasses.is_dataclass(A_ ):
A = [dataclass_types]
A = list(A_ )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(A_ )
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : ArgumentParser ,A_ : dataclasses.Field ) -> Optional[Any]:
A = F'--{field.name}'
A = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type ,A_ ):
raise RuntimeError(
'Unresolved type detected, which should have been done with the help of '
'`typing.get_type_hints` method by default' )
A = kwargs.pop('aliases' ,[] )
if isinstance(A_ ,A_ ):
A = [aliases]
A = getattr(field.type ,'__origin__' ,field.type )
if origin_type is Union or (hasattr(A_ ,'UnionType' ) and isinstance(A_ ,types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(A_ ) not in field.type.__args__
):
raise ValueError(
'Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'
' the argument parser only supports one type per argument.'
F' Problem encountered in field \'{field.name}\'.' )
if type(A_ ) not in field.type.__args__:
# filter `str` in Union
A = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
A = getattr(field.type ,'__origin__' ,field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
A = (
field.type.__args__[0] if isinstance(A_ ,field.type.__args__[1] ) else field.type.__args__[1]
)
A = getattr(field.type ,'__origin__' ,field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
A = {}
if origin_type is Literal or (isinstance(field.type ,A_ ) and issubclass(field.type ,A_ )):
if origin_type is Literal:
A = field.type.__args__
else:
A = [x.value for x in field.type]
A = make_choice_type_function(kwargs['choices'] )
if field.default is not dataclasses.MISSING:
A = field.default
else:
A = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
A = copy(A_ )
# Hack because type=bool in argparse does not behave as we want.
A = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
A = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
A = default
# This tells argparse we accept 0 or 1 value after --field_name
A = '?'
# This is the value that will get picked if we do --field_name (without value)
A = True
elif isclass(A_ ) and issubclass(A_ ,A_ ):
A = field.type.__args__[0]
A = '+'
if field.default_factory is not dataclasses.MISSING:
A = field.default_factory()
elif field.default is dataclasses.MISSING:
A = True
else:
A = field.type
if field.default is not dataclasses.MISSING:
A = field.default
elif field.default_factory is not dataclasses.MISSING:
A = field.default_factory()
else:
A = True
parser.add_argument(A_ ,*A_ ,**A_ )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
A = False
parser.add_argument(F'--no_{field.name}' ,action='store_false' ,dest=field.name ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : DataClassType ) -> List[Any]:
if hasattr(A_ ,'_argument_group_name' ):
A = self.add_argument_group(dtype._argument_group_name )
else:
A = self
try:
A = get_type_hints(A_ )
except NameError:
raise RuntimeError(
F'Type resolution failed for {dtype}. Try declaring the class in global scope or '
'removing line of `from __future__ import annotations` which opts in Postponed '
'Evaluation of Annotations (PEP 563)' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(A_ ):
A = '.'.join(map(A_ ,sys.version_info[:3] ) )
raise RuntimeError(
F'Type resolution failed for {dtype} on Python {python_version}. Try removing '
'line of `from __future__ import annotations` which opts in union types as '
'`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '
'support Python versions that lower than 3.10, you need to use '
'`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '
'`X | None`.' ) from ex
raise
for field in dataclasses.fields(A_ ):
if not field.init:
continue
A = type_hints[field.name]
self._parse_dataclass_field(A_ ,A_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Any=None ,A_ : int=False ,A_ : Any=True ,A_ : List[str]=None ,A_ : Union[str, Any]=None ,) -> Tuple[DataClass, ...]:
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
A = []
if args_filename:
args_files.append(Path(A_ ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('.args' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
A = ArgumentParser()
args_file_parser.add_argument(A_ ,type=A_ ,action='append' )
# Use only remaining args for further parsing (remove the args_file_flag)
A , A = args_file_parser.parse_known_args(args=A_ )
A = vars(A_ ).get(args_file_flag.lstrip('-' ) ,A_ )
if cmd_args_file_paths:
args_files.extend([Path(A_ ) for p in cmd_args_file_paths] )
A = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
A = file_args + args if args is not None else file_args + sys.argv[1:]
A , A = self.parse_known_args(args=A_ )
A = []
for dtype in self.dataclass_types:
A = {f.name for f in dataclasses.fields(A_ ) if f.init}
A = {k: v for k, v in vars(A_ ).items() if k in keys}
for k in keys:
delattr(A_ ,A_ )
A = dtype(**A_ )
outputs.append(A_ )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(A_ )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'Some specified arguments are not used by the HfArgumentParser: {remaining_args}' )
return (*outputs,)
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Dict[str, Any] ,A_ : bool = False ) -> Tuple[DataClass, ...]:
A = set(args.keys() )
A = []
for dtype in self.dataclass_types:
A = {f.name for f in dataclasses.fields(A_ ) if f.init}
A = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
A = dtype(**A_ )
outputs.append(A_ )
if not allow_extra_keys and unused_keys:
raise ValueError(F'Some keys are not used by the HfArgumentParser: {sorted(A_ )}' )
return tuple(A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : bool = False ) -> Tuple[DataClass, ...]:
with open(Path(A_ ) ,encoding='utf-8' ) as open_json_file:
A = json.loads(open_json_file.read() )
A = self.parse_dict(A_ ,allow_extra_keys=A_ )
return tuple(A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : bool = False ) -> Tuple[DataClass, ...]:
A = self.parse_dict(yaml.safe_load(Path(A_ ).read_text() ) ,allow_extra_keys=A_ )
return tuple(A_ ) | 74 | 0 |
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
lowerCamelCase = logging.get_logger(__name__)
class __magic_name__ ( _lowercase ):
'''simple docstring'''
def __init__( self, *lowercase_, **lowercase_ ) -> None:
"""simple docstring"""
warnings.warn(
'''The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use YolosImageProcessor instead.''', A_, )
super().__init__(*A_, **A_ )
| 188 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_lowercase = 16
_lowercase = 32
def _snake_case ( snake_case__ : Accelerator , snake_case__ : int = 16 , snake_case__ : str = "bert-base-cased" ):
A = AutoTokenizer.from_pretrained(snake_case__ )
A = load_dataset('glue' , 'mrpc' )
def tokenize_function(snake_case__ : Dict ):
# max_length=None => use the model max length (it's actually the default)
A = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=snake_case__ , max_length=snake_case__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
A = datasets.map(
snake_case__ , batched=snake_case__ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=snake_case__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(snake_case__ : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case__ , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(snake_case__ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
A = DataLoader(
tokenized_datasets['train'] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
A = DataLoader(
tokenized_datasets['validation'] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
return train_dataloader, eval_dataloader
def _snake_case ( snake_case__ : Optional[int] , snake_case__ : Optional[int] ):
# Initialize accelerator
A = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A = config['lr']
A = int(config['num_epochs'] )
A = int(config['seed'] )
A = int(config['batch_size'] )
A = args.model_name_or_path
set_seed(snake_case__ )
A , A = get_dataloaders(snake_case__ , snake_case__ , snake_case__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A = AutoModelForSequenceClassification.from_pretrained(snake_case__ , return_dict=snake_case__ )
# Instantiate optimizer
A = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
A = optimizer_cls(params=model.parameters() , lr=snake_case__ )
if accelerator.state.deepspeed_plugin is not None:
A = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
A = 1
A = (len(snake_case__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
A = get_linear_schedule_with_warmup(
optimizer=snake_case__ , num_warmup_steps=0 , num_training_steps=snake_case__ , )
else:
A = DummyScheduler(snake_case__ , total_num_steps=snake_case__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A , A , A , A , A = accelerator.prepare(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# We need to keep track of how many total steps we have iterated over
A = 0
# We also need to keep track of the stating epoch so files are named properly
A = 0
# Now we train the model
A = evaluate.load('glue' , 'mrpc' )
A = 0
A = {}
for epoch in range(snake_case__ , snake_case__ ):
model.train()
for step, batch in enumerate(snake_case__ ):
A = model(**snake_case__ )
A = outputs.loss
A = loss / gradient_accumulation_steps
accelerator.backward(snake_case__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
A = 0
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A = model(**snake_case__ )
A = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
A , A = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(snake_case__ ) - 1:
A = predictions[: len(eval_dataloader.dataset ) - samples_seen]
A = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=snake_case__ , references=snake_case__ , )
A = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , snake_case__ )
A = eval_metric['accuracy']
if best_performance < eval_metric["accuracy"]:
A = eval_metric['accuracy']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F'Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f:
json.dump(snake_case__ , snake_case__ )
def _snake_case ( ):
A = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=snake_case__ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=snake_case__ , )
parser.add_argument(
'--output_dir' , type=snake_case__ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--performance_lower_bound' , type=snake_case__ , default=snake_case__ , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , )
parser.add_argument(
'--num_epochs' , type=snake_case__ , default=3 , help='Number of train epochs.' , )
A = parser.parse_args()
A = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(snake_case__ , snake_case__ )
if __name__ == "__main__":
main() | 74 | 0 |
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
UpperCAmelCase : Any = logging.get_logger(__name__)
# General docstring
UpperCAmelCase : List[Any] = 'ResNetConfig'
# Base docstring
UpperCAmelCase : str = 'microsoft/resnet-50'
UpperCAmelCase : Dict = [1, 2_0_4_8, 7, 7]
# Image classification docstring
UpperCAmelCase : Tuple = 'microsoft/resnet-50'
UpperCAmelCase : Optional[Any] = 'tiger cat'
UpperCAmelCase : Optional[int] = [
'microsoft/resnet-50',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class lowerCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int = 3 , __SCREAMING_SNAKE_CASE : int = 1 , __SCREAMING_SNAKE_CASE : str = "relu" ) -> Tuple:
"""simple docstring"""
super().__init__()
__SCREAMING_SNAKE_CASE = nn.Convad(
A_ , A_ , kernel_size=A_ , stride=A_ , padding=kernel_size // 2 , bias=A_ )
__SCREAMING_SNAKE_CASE = nn.BatchNormad(A_ )
__SCREAMING_SNAKE_CASE = ACTaFN[activation] if activation is not None else nn.Identity()
def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : Tensor ) -> Tensor:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.convolution(A_ )
__SCREAMING_SNAKE_CASE = self.normalization(A_ )
__SCREAMING_SNAKE_CASE = self.activation(A_ )
return hidden_state
class lowerCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : ResNetConfig ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
__SCREAMING_SNAKE_CASE = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
__SCREAMING_SNAKE_CASE = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
__SCREAMING_SNAKE_CASE = config.num_channels
def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : Tensor ) -> Tensor:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
__SCREAMING_SNAKE_CASE = self.embedder(A_ )
__SCREAMING_SNAKE_CASE = self.pooler(A_ )
return embedding
class lowerCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int = 2 ) -> Optional[int]:
"""simple docstring"""
super().__init__()
__SCREAMING_SNAKE_CASE = nn.Convad(A_ , A_ , kernel_size=1 , stride=A_ , bias=A_ )
__SCREAMING_SNAKE_CASE = nn.BatchNormad(A_ )
def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Tensor ) -> Tensor:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.convolution(A_ )
__SCREAMING_SNAKE_CASE = self.normalization(A_ )
return hidden_state
class lowerCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int = 1 , __SCREAMING_SNAKE_CASE : str = "relu" ) -> Dict:
"""simple docstring"""
super().__init__()
__SCREAMING_SNAKE_CASE = in_channels != out_channels or stride != 1
__SCREAMING_SNAKE_CASE = (
ResNetShortCut(A_ , A_ , stride=A_ ) if should_apply_shortcut else nn.Identity()
)
__SCREAMING_SNAKE_CASE = nn.Sequential(
ResNetConvLayer(A_ , A_ , stride=A_ ) , ResNetConvLayer(A_ , A_ , activation=A_ ) , )
__SCREAMING_SNAKE_CASE = ACTaFN[activation]
def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = hidden_state
__SCREAMING_SNAKE_CASE = self.layer(A_ )
__SCREAMING_SNAKE_CASE = self.shortcut(A_ )
hidden_state += residual
__SCREAMING_SNAKE_CASE = self.activation(A_ )
return hidden_state
class lowerCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int = 1 , __SCREAMING_SNAKE_CASE : str = "relu" , __SCREAMING_SNAKE_CASE : int = 4 ) -> Dict:
"""simple docstring"""
super().__init__()
__SCREAMING_SNAKE_CASE = in_channels != out_channels or stride != 1
__SCREAMING_SNAKE_CASE = out_channels // reduction
__SCREAMING_SNAKE_CASE = (
ResNetShortCut(A_ , A_ , stride=A_ ) if should_apply_shortcut else nn.Identity()
)
__SCREAMING_SNAKE_CASE = nn.Sequential(
ResNetConvLayer(A_ , A_ , kernel_size=1 ) , ResNetConvLayer(A_ , A_ , stride=A_ ) , ResNetConvLayer(A_ , A_ , kernel_size=1 , activation=A_ ) , )
__SCREAMING_SNAKE_CASE = ACTaFN[activation]
def UpperCAmelCase__ ( self : str , __SCREAMING_SNAKE_CASE : str ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = hidden_state
__SCREAMING_SNAKE_CASE = self.layer(A_ )
__SCREAMING_SNAKE_CASE = self.shortcut(A_ )
hidden_state += residual
__SCREAMING_SNAKE_CASE = self.activation(A_ )
return hidden_state
class lowerCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : ResNetConfig , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int = 2 , __SCREAMING_SNAKE_CASE : int = 2 , ) -> Tuple:
"""simple docstring"""
super().__init__()
__SCREAMING_SNAKE_CASE = ResNetBottleNeckLayer if config.layer_type == """bottleneck""" else ResNetBasicLayer
__SCREAMING_SNAKE_CASE = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(A_ , A_ , stride=A_ , activation=config.hidden_act ) , *[layer(A_ , A_ , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tensor ) -> Tensor:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = input
for layer in self.layers:
__SCREAMING_SNAKE_CASE = layer(A_ )
return hidden_state
class lowerCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : ResNetConfig ) -> Dict:
"""simple docstring"""
super().__init__()
__SCREAMING_SNAKE_CASE = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
A_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
__SCREAMING_SNAKE_CASE = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(A_ , config.depths[1:] ):
self.stages.append(ResNetStage(A_ , A_ , A_ , depth=A_ ) )
def UpperCAmelCase__ ( self : str , __SCREAMING_SNAKE_CASE : Tensor , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = True ) -> BaseModelOutputWithNoAttention:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__SCREAMING_SNAKE_CASE = hidden_states + (hidden_state,)
__SCREAMING_SNAKE_CASE = stage_module(A_ )
if output_hidden_states:
__SCREAMING_SNAKE_CASE = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=A_ , hidden_states=A_ , )
class lowerCAmelCase__ ( _lowercase ):
"""simple docstring"""
lowerCAmelCase__ = ResNetConfig
lowerCAmelCase__ = '''resnet'''
lowerCAmelCase__ = '''pixel_values'''
lowerCAmelCase__ = True
def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any ) -> Any:
"""simple docstring"""
if isinstance(A_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" )
elif isinstance(A_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple=False ) -> str:
"""simple docstring"""
if isinstance(A_ , A_ ):
__SCREAMING_SNAKE_CASE = value
UpperCAmelCase : int = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
UpperCAmelCase : Union[str, Any] = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare ResNet model outputting raw features without any specific head on top." , _lowercase , )
class lowerCAmelCase__ ( _lowercase ):
"""simple docstring"""
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : Dict ) -> Dict:
"""simple docstring"""
super().__init__(A_ )
__SCREAMING_SNAKE_CASE = config
__SCREAMING_SNAKE_CASE = ResNetEmbeddings(A_ )
__SCREAMING_SNAKE_CASE = ResNetEncoder(A_ )
__SCREAMING_SNAKE_CASE = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(A_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=A_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : Tensor , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
__SCREAMING_SNAKE_CASE = self.embedder(A_ )
__SCREAMING_SNAKE_CASE = self.encoder(
A_ , output_hidden_states=A_ , return_dict=A_ )
__SCREAMING_SNAKE_CASE = encoder_outputs[0]
__SCREAMING_SNAKE_CASE = self.pooler(A_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=A_ , pooler_output=A_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , _lowercase , )
class lowerCAmelCase__ ( _lowercase ):
"""simple docstring"""
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : Any ) -> List[Any]:
"""simple docstring"""
super().__init__(A_ )
__SCREAMING_SNAKE_CASE = config.num_labels
__SCREAMING_SNAKE_CASE = ResNetModel(A_ )
# classification head
__SCREAMING_SNAKE_CASE = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(A_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=A_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , __SCREAMING_SNAKE_CASE : Optional[torch.LongTensor] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
__SCREAMING_SNAKE_CASE = self.resnet(A_ , output_hidden_states=A_ , return_dict=A_ )
__SCREAMING_SNAKE_CASE = outputs.pooler_output if return_dict else outputs[1]
__SCREAMING_SNAKE_CASE = self.classifier(A_ )
__SCREAMING_SNAKE_CASE = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__SCREAMING_SNAKE_CASE = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__SCREAMING_SNAKE_CASE = """single_label_classification"""
else:
__SCREAMING_SNAKE_CASE = """multi_label_classification"""
if self.config.problem_type == "regression":
__SCREAMING_SNAKE_CASE = MSELoss()
if self.num_labels == 1:
__SCREAMING_SNAKE_CASE = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__SCREAMING_SNAKE_CASE = loss_fct(A_ , A_ )
elif self.config.problem_type == "single_label_classification":
__SCREAMING_SNAKE_CASE = CrossEntropyLoss()
__SCREAMING_SNAKE_CASE = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__SCREAMING_SNAKE_CASE = BCEWithLogitsLoss()
__SCREAMING_SNAKE_CASE = loss_fct(A_ , A_ )
if not return_dict:
__SCREAMING_SNAKE_CASE = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=A_ , logits=A_ , hidden_states=outputs.hidden_states )
@add_start_docstrings(
"\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n " , _lowercase , )
class lowerCAmelCase__ ( _lowercase , _lowercase ):
"""simple docstring"""
def __init__( self : int , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
super().__init__(A_ )
super()._init_backbone(A_ )
__SCREAMING_SNAKE_CASE = [config.embedding_size] + config.hidden_sizes
__SCREAMING_SNAKE_CASE = ResNetEmbeddings(A_ )
__SCREAMING_SNAKE_CASE = ResNetEncoder(A_ )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(A_ )
@replace_return_docstrings(output_type=A_ , config_class=_CONFIG_FOR_DOC )
def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : Tensor , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None ) -> BackboneOutput:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
__SCREAMING_SNAKE_CASE = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__SCREAMING_SNAKE_CASE = self.embedder(A_ )
__SCREAMING_SNAKE_CASE = self.encoder(A_ , output_hidden_states=A_ , return_dict=A_ )
__SCREAMING_SNAKE_CASE = outputs.hidden_states
__SCREAMING_SNAKE_CASE = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
__SCREAMING_SNAKE_CASE = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=A_ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=A_ , )
| 267 |
"""simple docstring"""
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] ,A_ : str ,A_ : Dict=13 ,A_ : str=7 ,A_ : str=True ,A_ : Any=True ,A_ : Optional[Any]=True ,A_ : Any=True ,A_ : Optional[Any]=True ,A_ : Any=False ,A_ : str=False ,A_ : Tuple=False ,A_ : str=2 ,A_ : Optional[int]=99 ,A_ : Union[str, Any]=0 ,A_ : Optional[Any]=32 ,A_ : Optional[int]=5 ,A_ : Optional[int]=4 ,A_ : Union[str, Any]=0.1 ,A_ : List[str]=0.1 ,A_ : Union[str, Any]=512 ,A_ : Union[str, Any]=2 ,A_ : Any=0.02 ,A_ : List[str]=2 ,A_ : int=4 ,A_ : int="last" ,A_ : Dict=True ,A_ : Union[str, Any]=None ,A_ : Any=0 ,) -> List[Any]:
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_input_lengths
A = use_token_type_ids
A = use_labels
A = gelu_activation
A = sinusoidal_embeddings
A = causal
A = asm
A = n_langs
A = vocab_size
A = n_special
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_sequence_label_size
A = initializer_range
A = num_labels
A = num_choices
A = summary_type
A = use_proj
A = scope
A = bos_token_id
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A = random_attention_mask([self.batch_size, self.seq_length] )
A = None
if self.use_input_lengths:
A = (
ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
A = None
if self.use_token_type_ids:
A = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs )
A = None
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A = ids_tensor([self.batch_size] ,2 ).float()
A = ids_tensor([self.batch_size] ,self.num_choices )
A = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
return XLMConfig(
vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,num_labels=self.num_labels ,bos_token_id=self.bos_token_id ,)
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Any ,A_ : int ,A_ : Dict ,A_ : str ,A_ : Optional[Any] ,A_ : List[str] ,A_ : Union[str, Any] ,A_ : int ,A_ : str ,) -> Any:
A = XLMModel(config=A_ )
model.to(A_ )
model.eval()
A = model(A_ ,lengths=A_ ,langs=A_ )
A = model(A_ ,langs=A_ )
A = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Any ,A_ : str ,A_ : Optional[int] ,A_ : Union[str, Any] ,A_ : Optional[int] ,A_ : str ,A_ : Any ,A_ : str ,A_ : Dict ,) -> Dict:
A = XLMWithLMHeadModel(A_ )
model.to(A_ )
model.eval()
A = model(A_ ,token_type_ids=A_ ,labels=A_ )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : List[str] ,A_ : Union[str, Any] ,A_ : Union[str, Any] ,A_ : List[str] ,A_ : Any ,A_ : Optional[int] ,A_ : Optional[int] ,A_ : Optional[int] ,A_ : Optional[Any] ,) -> int:
A = XLMForQuestionAnsweringSimple(A_ )
model.to(A_ )
model.eval()
A = model(A_ )
A = model(A_ ,start_positions=A_ ,end_positions=A_ )
A = outputs
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Tuple ,A_ : Optional[int] ,A_ : Any ,A_ : List[Any] ,A_ : int ,A_ : Tuple ,A_ : Tuple ,A_ : List[str] ,A_ : Optional[int] ,) -> List[Any]:
A = XLMForQuestionAnswering(A_ )
model.to(A_ )
model.eval()
A = model(A_ )
A = model(
A_ ,start_positions=A_ ,end_positions=A_ ,cls_index=A_ ,is_impossible=A_ ,p_mask=A_ ,)
A = model(
A_ ,start_positions=A_ ,end_positions=A_ ,cls_index=A_ ,is_impossible=A_ ,)
((A) , ) = result_with_labels.to_tuple()
A = model(A_ ,start_positions=A_ ,end_positions=A_ )
((A) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape ,() )
self.parent.assertEqual(result.start_top_log_probs.shape ,(self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape ,(self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape ,(self.batch_size,) )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Tuple ,A_ : int ,A_ : Optional[int] ,A_ : List[str] ,A_ : str ,A_ : Optional[Any] ,A_ : Optional[int] ,A_ : Optional[Any] ,A_ : List[Any] ,) -> Optional[int]:
A = XLMForSequenceClassification(A_ )
model.to(A_ )
model.eval()
A = model(A_ )
A = model(A_ ,labels=A_ )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : List[Any] ,A_ : str ,A_ : Optional[Any] ,A_ : List[Any] ,A_ : Optional[int] ,A_ : Tuple ,A_ : Union[str, Any] ,A_ : Optional[int] ,A_ : Optional[int] ,) -> List[str]:
A = self.num_labels
A = XLMForTokenClassification(A_ )
model.to(A_ )
model.eval()
A = model(A_ ,attention_mask=A_ ,labels=A_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Optional[int] ,A_ : Union[str, Any] ,A_ : List[str] ,A_ : Optional[int] ,A_ : List[str] ,A_ : Optional[Any] ,A_ : Union[str, Any] ,A_ : Dict ,A_ : List[Any] ,) -> List[str]:
A = self.num_choices
A = XLMForMultipleChoice(config=A_ )
model.to(A_ )
model.eval()
A = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A = model(
A_ ,attention_mask=A_ ,token_type_ids=A_ ,labels=A_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
A = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = config_and_inputs
A = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( _lowercase , _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Union[str, Any] = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
_lowerCamelCase: str = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_lowerCamelCase: Optional[int] = (
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[int] ,A_ : Union[str, Any] ,A_ : Union[str, Any] ,A_ : Any ,A_ : Any ) -> Any:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : str ,A_ : Optional[int] ,A_ : List[Any]=False ) -> int:
A = super()._prepare_for_class(A_ ,A_ ,return_labels=A_ )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A_ )
A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A_ )
return inputs_dict
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
A = XLMModelTester(self )
A = ConfigTester(self ,config_class=A_ ,emb_dim=37 )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Union[str, Any] ,A_ : Any ,A_ : str ,A_ : Tuple ,A_ : Any ,A_ : Any=False ,A_ : Any=1 ) -> List[Any]:
self.assertIsInstance(A_ ,A_ )
self.assertListEqual(
[isinstance(A_ ,A_ ) for iter_attentions in attentions] ,[True] * len(A_ ) )
self.assertEqual(len(A_ ) ,(max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(A_ ):
# adds PAD dummy token
A = min_length + idx + 1
A = min_length + idx + 1
A = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] ,[expected_shape] * len(A_ ) )
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Optional[int] ,A_ : str ,A_ : Optional[int] ,A_ : int ,A_ : Any ,A_ : str=False ,A_ : Any=1 ) -> Tuple:
self.assertIsInstance(A_ ,A_ )
self.assertListEqual(
[isinstance(A_ ,A_ ) for iter_hidden_states in hidden_states] ,[True] * len(A_ ) ,)
self.assertEqual(len(A_ ) ,(max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(A_ ):
# adds PAD dummy token
A = min_length + idx + 1
A = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] ,[expected_shape] * len(A_ ) ,)
pass
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = XLMModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
A = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(A_ )
A = torch.tensor([[14, 447]] ,dtype=torch.long ,device=A_ ) # the president
A = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
A = model.generate(A_ ,do_sample=A_ )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() ,A_ ) | 74 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class a_ ( _lowercase ):
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = 42
def __init__( self : List[Any] ,snake_case : UNetaDModel ,snake_case : KarrasVeScheduler ):
super().__init__()
self.register_modules(unet=A_ ,scheduler=A_ )
@torch.no_grad()
def __call__( self : Optional[Any] ,snake_case : int = 1 ,snake_case : int = 50 ,snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,snake_case : Optional[str] = "pil" ,snake_case : bool = True ,**snake_case : Optional[Any] ,):
SCREAMING_SNAKE_CASE =self.unet.config.sample_size
SCREAMING_SNAKE_CASE =(batch_size, 3, img_size, img_size)
SCREAMING_SNAKE_CASE =self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
SCREAMING_SNAKE_CASE =randn_tensor(A_ ,generator=A_ ,device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(A_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
SCREAMING_SNAKE_CASE =self.scheduler.schedule[t]
SCREAMING_SNAKE_CASE =self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.scheduler.add_noise_to_input(A_ ,A_ ,generator=A_ )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
SCREAMING_SNAKE_CASE =(sigma_hat / 2) * model((sample_hat + 1) / 2 ,sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
SCREAMING_SNAKE_CASE =self.scheduler.step(A_ ,A_ ,A_ ,A_ )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
SCREAMING_SNAKE_CASE =(sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 ,sigma_prev / 2 ).sample
SCREAMING_SNAKE_CASE =self.scheduler.step_correct(
A_ ,A_ ,A_ ,A_ ,step_output.prev_sample ,step_output['derivative'] ,)
SCREAMING_SNAKE_CASE =step_output.prev_sample
SCREAMING_SNAKE_CASE =(sample / 2 + 0.5).clamp(0 ,1 )
SCREAMING_SNAKE_CASE =sample.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE =self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ )
| 334 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
_lowercase = logging.get_logger(__name__)
@dataclass
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[int] = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self : int ,**A_ : Any ) -> Any:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
A = deprecated_arg[3:]
A = not kwargs.pop(A_ )
logger.warning(
F'{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'
F' {positive_arg}={kwargs[positive_arg]}' )
A = kwargs.pop('tpu_name' ,self.tpu_name )
A = kwargs.pop('device_idx' ,self.device_idx )
A = kwargs.pop('eager_mode' ,self.eager_mode )
A = kwargs.pop('use_xla' ,self.use_xla )
super().__init__(**A_ )
_lowerCamelCase: str = field(
default=_lowercase , metadata={'''help''': '''Name of TPU'''} , )
_lowerCamelCase: int = field(
default=0 , metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''} , )
_lowerCamelCase: bool = field(default=_lowercase , metadata={'''help''': '''Benchmark models in eager model.'''} )
_lowerCamelCase: bool = field(
default=_lowercase , metadata={
'''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'''
} , )
@cached_property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self ,['tf'] )
A = None
if self.tpu:
try:
if self.tpu_name:
A = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
A = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
A = None
return tpu
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self ,['tf'] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
A = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] ,'GPU' )
A = tf.distribute.OneDeviceStrategy(device=F'/gpu:{self.device_idx}' )
else:
tf.config.set_visible_devices([] ,'GPU' ) # disable GPU
A = tf.distribute.OneDeviceStrategy(device=F'/cpu:{self.device_idx}' )
return strategy
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> bool:
requires_backends(self ,['tf'] )
return self._setup_tpu is not None
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> "tf.distribute.Strategy":
requires_backends(self ,['tf'] )
return self._setup_strategy
@property
def _SCREAMING_SNAKE_CASE ( self : int ) -> str:
requires_backends(self ,['tf'] )
return tf.config.list_physical_devices('GPU' )
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
requires_backends(self ,['tf'] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def _SCREAMING_SNAKE_CASE ( self : str ) -> bool:
return self.n_gpu > 0 | 74 | 0 |
"""simple docstring"""
from __future__ import annotations
__lowerCamelCase = [True] * 1_00_00_01
__lowerCamelCase = 2
while i * i <= 1_00_00_00:
if seive[i]:
for j in range(i * i, 1_00_00_01, i):
__lowerCamelCase = False
i += 1
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
return seive[n]
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
return any(digit in '02468' for digit in str(snake_case__ ) )
def UpperCAmelCase ( UpperCamelCase__ = 1_000_000 ):
"""simple docstring"""
A__ = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(snake_case__ ) and not contains_an_even_digit(snake_case__ ):
A__ = str(snake_case__ )
A__ = [int(str_num[j:] + str_num[:j] ) for j in range(len(snake_case__ ) )]
if all(is_prime(snake_case__ ) for i in list_nums ):
result.append(snake_case__ )
return result
def UpperCAmelCase ( ):
"""simple docstring"""
return len(find_circular_primes() )
if __name__ == "__main__":
print(F'''{len(find_circular_primes()) = }''')
| 221 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Tuple = '''dpt'''
def __init__( self : str ,A_ : Tuple=768 ,A_ : int=12 ,A_ : Optional[int]=12 ,A_ : Optional[int]=3072 ,A_ : List[str]="gelu" ,A_ : str=0.0 ,A_ : int=0.0 ,A_ : str=0.02 ,A_ : str=1e-12 ,A_ : str=384 ,A_ : Dict=16 ,A_ : Union[str, Any]=3 ,A_ : Dict=False ,A_ : Any=True ,A_ : Optional[int]=[2, 5, 8, 11] ,A_ : Optional[Any]="project" ,A_ : Tuple=[4, 2, 1, 0.5] ,A_ : int=[96, 192, 384, 768] ,A_ : int=256 ,A_ : str=-1 ,A_ : Optional[int]=False ,A_ : Optional[int]=True ,A_ : Union[str, Any]=0.4 ,A_ : Union[str, Any]=255 ,A_ : Union[str, Any]=0.1 ,A_ : List[str]=[1, 1024, 24, 24] ,A_ : List[str]=[0, 1] ,A_ : List[Any]=None ,**A_ : Tuple ,) -> Union[str, Any]:
super().__init__(**A_ )
A = hidden_size
A = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('Initializing the config with a `BiT` backbone.' )
A = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
}
A = BitConfig(**A_ )
elif isinstance(A_ ,A_ ):
logger.info('Initializing the config with a `BiT` backbone.' )
A = BitConfig(**A_ )
elif isinstance(A_ ,A_ ):
A = backbone_config
else:
raise ValueError(
F'backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.' )
A = backbone_featmap_shape
A = neck_ignore_stages
if readout_type != "project":
raise ValueError('Readout type must be \'project\' when using `DPT-hybrid` mode.' )
else:
A = None
A = None
A = []
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = image_size
A = patch_size
A = num_channels
A = qkv_bias
A = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('Readout_type must be one of [\'ignore\', \'add\', \'project\']' )
A = readout_type
A = reassemble_factors
A = neck_hidden_sizes
A = fusion_hidden_size
A = head_in_index
A = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
A = use_auxiliary_head
A = auxiliary_loss_weight
A = semantic_loss_ignore_index
A = semantic_classifier_dropout
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
A = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
A = self.backbone_config.to_dict()
A = self.__class__.model_type
return output | 74 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ = None ):
'''simple docstring'''
if components is None:
__snake_case : int = []
__snake_case : List[str] = list(A_ )
def __len__(self ):
'''simple docstring'''
return len(self.__components )
def __str__(self ):
'''simple docstring'''
return "(" + ",".join(map(A_ , self.__components ) ) + ")"
def __add__(self , a_ ):
'''simple docstring'''
__snake_case : str = len(self )
if size == len(A_ ):
__snake_case : Any = [self.__components[i] + other.component(A_ ) for i in range(A_ )]
return Vector(A_ )
else:
raise Exception('''must have the same size''' )
def __sub__(self , a_ ):
'''simple docstring'''
__snake_case : Optional[Any] = len(self )
if size == len(A_ ):
__snake_case : List[Any] = [self.__components[i] - other.component(A_ ) for i in range(A_ )]
return Vector(A_ )
else: # error case
raise Exception('''must have the same size''' )
@overload
def __mul__(self , a_ ):
'''simple docstring'''
...
@overload
def __mul__(self , a_ ):
'''simple docstring'''
...
def __mul__(self , a_ ):
'''simple docstring'''
if isinstance(A_ , (float, int) ):
__snake_case : Union[str, Any] = [c * other for c in self.__components]
return Vector(A_ )
elif isinstance(A_ , A_ ) and len(self ) == len(A_ ):
__snake_case : List[Any] = len(self )
__snake_case : Union[str, Any] = [self.__components[i] * other.component(A_ ) for i in range(A_ )]
return sum(A_ )
else: # error case
raise Exception('''invalid operand!''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return Vector(self.__components )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
if isinstance(A_ , A_ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('''index out of range''' )
def SCREAMING_SNAKE_CASE (self , a_ , a_ ):
'''simple docstring'''
assert -len(self.__components ) <= pos < len(self.__components )
__snake_case : Optional[int] = value
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
if len(self.__components ) == 0:
raise Exception('''Vector is empty''' )
__snake_case : Tuple = [c**2 for c in self.__components]
return math.sqrt(sum(A_ ) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ = False ):
'''simple docstring'''
__snake_case : List[str] = self * other
__snake_case : Any = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def lowercase ( _snake_case : int ) ->Optional[Any]:
"""simple docstring"""
assert isinstance(snake_case__ , snake_case__ )
return Vector([0] * dimension )
def lowercase ( _snake_case : int , _snake_case : int ) ->int:
"""simple docstring"""
assert isinstance(snake_case__ , snake_case__ ) and (isinstance(snake_case__ , snake_case__ ))
__snake_case : Tuple = [0] * dimension
__snake_case : Dict = 1
return Vector(snake_case__ )
def lowercase ( _snake_case : float , _snake_case : Vector , _snake_case : Vector ) ->Optional[Any]:
"""simple docstring"""
assert (
isinstance(snake_case__ , snake_case__ )
and isinstance(snake_case__ , snake_case__ )
and (isinstance(snake_case__ , (int, float) ))
)
return x * scalar + y
def lowercase ( _snake_case : int , _snake_case : int , _snake_case : int ) ->List[str]:
"""simple docstring"""
random.seed(snake_case__ )
__snake_case : List[Any] = [random.randint(snake_case__ , snake_case__ ) for _ in range(snake_case__ )]
return Vector(snake_case__ )
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Dict = matrix
__snake_case : Dict = w
__snake_case : Tuple = h
def __str__(self ):
'''simple docstring'''
__snake_case : Union[str, Any] = ''''''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__(self , a_ ):
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
__snake_case : Tuple = []
for i in range(self.__height ):
__snake_case : int = [
self.__matrix[i][j] + other.component(A_ , A_ )
for j in range(self.__width )
]
matrix.append(A_ )
return Matrix(A_ , self.__width , self.__height )
else:
raise Exception('''matrix must have the same dimension!''' )
def __sub__(self , a_ ):
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
__snake_case : int = []
for i in range(self.__height ):
__snake_case : str = [
self.__matrix[i][j] - other.component(A_ , A_ )
for j in range(self.__width )
]
matrix.append(A_ )
return Matrix(A_ , self.__width , self.__height )
else:
raise Exception('''matrices must have the same dimension!''' )
@overload
def __mul__(self , a_ ):
'''simple docstring'''
...
@overload
def __mul__(self , a_ ):
'''simple docstring'''
...
def __mul__(self , a_ ):
'''simple docstring'''
if isinstance(A_ , A_ ): # matrix-vector
if len(A_ ) == self.__width:
__snake_case : Any = zero_vector(self.__height )
for i in range(self.__height ):
__snake_case : str = [
self.__matrix[i][j] * other.component(A_ )
for j in range(self.__width )
]
ans.change_component(A_ , sum(A_ ) )
return ans
else:
raise Exception(
'''vector must have the same size as the '''
'''number of columns of the matrix!''' )
elif isinstance(A_ , (int, float) ): # matrix-scalar
__snake_case : Dict = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(A_ , self.__width , self.__height )
return None
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.__height
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.__width
def SCREAMING_SNAKE_CASE (self , a_ , a_ ):
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('''change_component: indices out of bounds''' )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ ):
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
__snake_case : List[Any] = value
else:
raise Exception('''change_component: indices out of bounds''' )
def SCREAMING_SNAKE_CASE (self , a_ , a_ ):
'''simple docstring'''
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
__snake_case : Union[str, Any] = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(A_ ) ):
__snake_case : Tuple = minor[i][:y] + minor[i][y + 1 :]
return Matrix(A_ , self.__width - 1 , self.__height - 1 ).determinant()
def SCREAMING_SNAKE_CASE (self , a_ , a_ ):
'''simple docstring'''
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(A_ , A_ )
else:
raise Exception('''Indices out of bounds''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
if self.__height < 1:
raise Exception('''Matrix has no element''' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__snake_case : str = [
self.__matrix[0][y] * self.cofactor(0 , A_ ) for y in range(self.__width )
]
return sum(A_ )
def lowercase ( _snake_case : int ) ->Union[str, Any]:
"""simple docstring"""
__snake_case : Any = [[0] * n for _ in range(snake_case__ )]
return Matrix(snake_case__ , snake_case__ , snake_case__ )
def lowercase ( _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : int ) ->List[str]:
"""simple docstring"""
random.seed(snake_case__ )
__snake_case : Optional[Any] = [
[random.randint(snake_case__ , snake_case__ ) for _ in range(snake_case__ )] for _ in range(snake_case__ )
]
return Matrix(snake_case__ , snake_case__ , snake_case__ )
| 102 |
"""simple docstring"""
from __future__ import annotations
import math
_lowercase = '''2020.9.26'''
_lowercase = '''xcodz-dot, cclaus, dhruvmanila'''
def _snake_case ( snake_case__ : float , snake_case__ : float , snake_case__ : float , snake_case__ : float , snake_case__ : float ):
if not all(isinstance(snake_case__ , (float, int) ) for val in locals().values() ):
A = F'Input values must either be float or int: {list(locals().values() )}'
raise TypeError(snake_case__ )
A = ((x * distance) / (z + distance)) * scale
A = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def _snake_case ( snake_case__ : float , snake_case__ : float , snake_case__ : float , snake_case__ : str , snake_case__ : float ):
if not isinstance(snake_case__ , snake_case__ ):
raise TypeError('Axis must be a str' )
A = locals()
del input_variables["axis"]
if not all(isinstance(snake_case__ , (float, int) ) for val in input_variables.values() ):
A = (
'Input values except axis must either be float or int: '
F'{list(input_variables.values() )}'
)
raise TypeError(snake_case__ )
A = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
A = x * math.cos(snake_case__ ) - y * math.sin(snake_case__ )
A = y * math.cos(snake_case__ ) + x * math.sin(snake_case__ )
A = z
elif axis == "x":
A = y * math.cos(snake_case__ ) - z * math.sin(snake_case__ )
A = z * math.cos(snake_case__ ) + y * math.sin(snake_case__ )
A = x
elif axis == "y":
A = x * math.cos(snake_case__ ) - z * math.sin(snake_case__ )
A = z * math.cos(snake_case__ ) + x * math.sin(snake_case__ )
A = y
else:
raise ValueError('not a valid axis, choose one of \'x\', \'y\', \'z\'' )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }""")
print(F"""{rotate(1.0, 2.0, 3.0, 'y', 90.0) = }""") | 74 | 0 |
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
__lowercase = namedtuple(
'''_TestCommandArgs''',
[
'''dataset''',
'''name''',
'''cache_dir''',
'''data_dir''',
'''all_configs''',
'''save_infos''',
'''ignore_verifications''',
'''force_redownload''',
'''clear_cache''',
],
defaults=[None, None, None, False, False, False, False, False],
)
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[int] = _TestCommandArgs(dataset=snake_case__ , all_configs=snake_case__ , save_infos=snake_case__ )
__UpperCamelCase :List[str] = TestCommand(*snake_case__ )
test_command.run()
__UpperCamelCase :Any = os.path.join(snake_case__ , '''README.md''' )
assert os.path.exists(snake_case__ )
__UpperCamelCase :Any = DatasetInfosDict.from_directory(snake_case__ )
__UpperCamelCase :Any = DatasetInfosDict(
{
'''default''': DatasetInfo(
features=Features(
{
'''tokens''': Sequence(Value('''string''' ) ),
'''ner_tags''': Sequence(
ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ),
'''langs''': Sequence(Value('''string''' ) ),
'''spans''': Sequence(Value('''string''' ) ),
} ) , splits=[
{
'''name''': '''train''',
'''num_bytes''': 2_351_563,
'''num_examples''': 10_000,
},
{
'''name''': '''validation''',
'''num_bytes''': 238_418,
'''num_examples''': 1_000,
},
] , download_size=3_940_680 , dataset_size=2_589_981 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
__UpperCamelCase , __UpperCamelCase :Optional[Any] = getattr(dataset_infos['''default'''] , snake_case__ ), getattr(expected_dataset_infos['''default'''] , snake_case__ )
if key == "num_bytes":
assert is_apercent_close(snake_case__ , snake_case__ )
elif key == "splits":
assert list(snake_case__ ) == list(snake_case__ )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 43 |
"""simple docstring"""
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : int ,A_ : int ) -> Union[str, Any]:
A = n
A = [None] * self.n
A = 0 # index of the first element
A = 0
A = 0
def __len__( self : int ) -> int:
return self.size
def _SCREAMING_SNAKE_CASE ( self : Any ) -> bool:
return self.size == 0
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
return False if self.is_empty() else self.array[self.front]
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : List[Any] ) -> int:
if self.size >= self.n:
raise Exception('QUEUE IS FULL' )
A = data
A = (self.rear + 1) % self.n
self.size += 1
return self
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
if self.size == 0:
raise Exception('UNDERFLOW' )
A = self.array[self.front]
A = None
A = (self.front + 1) % self.n
self.size -= 1
return temp | 74 | 0 |
import string
from math import logaa
def _a ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
UpperCamelCase__ : Optional[int] = document.translate(
str.maketrans('''''' , '''''' , string.punctuation ) ).replace('''\n''' , '''''' )
UpperCamelCase__ : Union[str, Any] = document_without_punctuation.split(''' ''' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def _a ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
UpperCamelCase__ : Dict = corpus.lower().translate(
str.maketrans('''''' , '''''' , string.punctuation ) ) # strip all punctuation and replace it with ''
UpperCamelCase__ : Optional[Any] = corpus_without_punctuation.split('''\n''' )
UpperCamelCase__ : List[Any] = term.lower()
return (len([doc for doc in docs if term in doc] ), len(snake_case__ ))
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Any=False ):
"""simple docstring"""
if smoothing:
if n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError('''df must be > 0''' )
elif n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(logaa(n / df ) , 3 )
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return round(tf * idf , 3 )
| 146 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
_lowercase = logging.get_logger(__name__)
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,*A_ : List[str] ,**A_ : int ) -> None:
warnings.warn(
'The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use YolosImageProcessor instead.' ,A_ ,)
super().__init__(*A_ ,**A_ ) | 74 | 0 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
UpperCAmelCase__ = inspect.getfile(accelerate.test_utils )
UpperCAmelCase__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] )
UpperCAmelCase__ = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_distributed_data_loop.py"""] )
UpperCAmelCase__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_ops.py"""] )
@require_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
print(f'''Found {torch.cuda.device_count()} devices.''' )
UpperCAmelCase__ = ["""torchrun""", f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A_ , env=os.environ.copy() )
@require_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
print(f'''Found {torch.cuda.device_count()} devices.''' )
UpperCAmelCase__ = ["""torchrun""", f'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path]
print(f'''Command: {cmd}''' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A_ , env=os.environ.copy() )
@require_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
UpperCAmelCase__ = ["""torchrun""", f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A_ , env=os.environ.copy() )
@require_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
print(f'''Found {torch.cuda.device_count()} devices, using 2 devices only''' )
UpperCAmelCase__ = ["""torchrun""", f'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="""0,1""" ):
execute_subprocess_async(A_ , env=os.environ.copy() )
if __name__ == "__main__":
UpperCAmelCase_ = Accelerator()
UpperCAmelCase_ = (accelerator.state.process_index + 2, 1_0)
UpperCAmelCase_ = torch.randint(0, 1_0, shape).to(accelerator.device)
UpperCAmelCase_ = ''
UpperCAmelCase_ = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
UpperCAmelCase_ = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
UpperCAmelCase_ = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 346 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''',
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: List[str] = '''gpt_bigcode'''
_lowerCamelCase: List[Any] = ['''past_key_values''']
_lowerCamelCase: int = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Optional[int] ,A_ : Dict=5_0257 ,A_ : Union[str, Any]=1024 ,A_ : str=768 ,A_ : Any=12 ,A_ : Any=12 ,A_ : Optional[int]=None ,A_ : Any="gelu_pytorch_tanh" ,A_ : List[str]=0.1 ,A_ : Optional[int]=0.1 ,A_ : List[str]=0.1 ,A_ : Tuple=1e-5 ,A_ : Optional[int]=0.02 ,A_ : List[str]=True ,A_ : Optional[Any]=True ,A_ : List[Any]=5_0256 ,A_ : Union[str, Any]=5_0256 ,A_ : int=True ,A_ : Optional[Any]=True ,A_ : Dict=True ,**A_ : Union[str, Any] ,) -> Union[str, Any]:
A = vocab_size
A = n_positions
A = n_embd
A = n_layer
A = n_head
A = n_inner
A = activation_function
A = resid_pdrop
A = embd_pdrop
A = attn_pdrop
A = layer_norm_epsilon
A = initializer_range
A = scale_attn_weights
A = use_cache
A = attention_softmax_in_fpaa
A = scale_attention_softmax_in_fpaa
A = multi_query
A = bos_token_id
A = eos_token_id
super().__init__(bos_token_id=A_ ,eos_token_id=A_ ,**A_ ) | 74 | 0 |
"""simple docstring"""
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = [False] * len(snake_case__ )
__A = []
queue.append(snake_case__ )
__A = True
while queue:
__A = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(snake_case__ )
__A = True
__A = u
return visited[t]
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = [-1] * (len(snake_case__ ))
__A = 0
while bfs(snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
__A = float('''Inf''' )
__A = sink
while s != source:
# Find the minimum value in select path
__A = min(snake_case__ , graph[parent[s]][s] )
__A = parent[s]
max_flow += path_flow
__A = sink
while v != source:
__A = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__A = parent[v]
return max_flow
lowercase_ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
lowercase_ , lowercase_ = 0, 5
print(ford_fulkerson(graph, source, sink))
| 266 |
"""simple docstring"""
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
_lowercase = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F"""{bindir}/../../examples/pytorch/translation"""):
from run_translation import main # noqa
set_seed(42)
_lowercase = '''sshleifer/student_marian_en_ro_6_1'''
_lowercase = '''sshleifer/tiny-mbart'''
@require_torch
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Union[str, Any]=False ,A_ : Optional[int]=None ,A_ : List[str]=True ,A_ : Tuple=True ,A_ : Union[str, Any]=True ,A_ : List[str]=True ,) -> Tuple:
A = self.run_trainer(
eval_steps=1 ,max_len=12 ,model_name=A_ ,num_train_epochs=1 ,distributed=A_ ,extra_args_str=A_ ,predict_with_generate=A_ ,do_train=A_ ,do_eval=A_ ,do_predict=A_ ,)
A = TrainerState.load_from_json(os.path.join(A_ ,'trainer_state.json' ) ).log_history
if not do_eval:
return
A = [log for log in logs if 'eval_loss' in log.keys()]
A = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
A = eval_metrics[-1]
assert isinstance(last_step_stats['eval_bleu'] ,A_ )
assert not math.isnan(float(last_step_stats['eval_loss'] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def _SCREAMING_SNAKE_CASE ( self : int ) -> int:
self.run_seqaseq_quick(distributed=A_ )
@require_torch_multi_gpu
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
self.run_seqaseq_quick(distributed=A_ )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
self.run_seqaseq_quick(distributed=A_ ,extra_args_str='--sharded_ddp simple' )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def _SCREAMING_SNAKE_CASE ( self : Any ) -> int:
self.run_seqaseq_quick(distributed=A_ ,extra_args_str='--sharded_ddp simple --fp16' )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
self.run_seqaseq_quick(distributed=A_ ,extra_args_str='--sharded_ddp zero_dp_2' ,predict_with_generate=A_ )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
self.run_seqaseq_quick(
distributed=A_ ,extra_args_str='--sharded_ddp zero_dp_2 --fp16' ,predict_with_generate=A_ )
@require_apex
@require_torch_gpu
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=A_ ,extra_args_str='--fp16 --fp16_backend=apex' )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=A_ ,extra_args_str='--fp16 --fp16_backend=apex' )
@parameterized.expand(['base', 'low', 'high', 'mixed'] )
@require_torch_multi_gpu
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : Dict ) -> List[str]:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
A = {
# test with the default log_level - should be info and thus log info once
'base': {'extra_args_str': '', 'n_matches': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'low': {'extra_args_str': '--log_level debug --log_level_replica debug', 'n_matches': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'high': {'extra_args_str': '--log_level error --log_level_replica debug', 'n_matches': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'mixed': {'extra_args_str': '--log_level error --log_level_replica error', 'n_matches': 0},
}
A = experiments[experiment_id]
A = {'distributed': True, 'predict_with_generate': False, 'do_eval': False, 'do_predict': False}
A = 'Running training'
with CaptureStderr() as cl:
self.run_seqaseq_quick(**A_ ,extra_args_str=data['extra_args_str'] )
A = len(re.findall(A_ ,cl.err ) )
self.assertEqual(A_ ,data['n_matches'] )
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
A = self.run_trainer(
eval_steps=2 ,max_len=128 ,model_name=A_ ,learning_rate=3e-4 ,num_train_epochs=10 ,distributed=A_ ,)
# Check metrics
A = TrainerState.load_from_json(os.path.join(A_ ,'trainer_state.json' ) ).log_history
A = [log for log in logs if 'eval_loss' in log.keys()]
A = eval_metrics[0]
A = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['eval_bleu'] ,A_ )
# test if do_predict saves generations and metrics
A = os.listdir(A_ )
A = {os.path.basename(A_ ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(A_ : str ) -> Tuple[int, float]:
A = '--skip_memory_metrics 0'
A = self.run_trainer(
max_len=128 ,model_name=A_ ,learning_rate=3e-4 ,num_train_epochs=1 ,optim=A_ ,distributed=A_ ,extra_args_str=A_ ,do_eval=A_ ,do_predict=A_ ,n_gpus_to_use=1 ,)
# Check metrics
A = TrainerState.load_from_json(Path(A_ ,'trainer_state.json' ) ).log_history
A = int(logs[0]['train_mem_gpu_peaked_delta'] / 2**20 )
A = int(logs[0]['train_mem_gpu_alloc_delta'] / 2**20 )
A = logs[0]['train_loss']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
A , A , A = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
A , A , A = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
A = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
A = gpu_peak_mem_orig + gpu_alloc_mem_orig
A = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
A = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
A = 120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
A_ ,A_ ,'should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'
F' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'
F' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB' ,)
self.assertGreater(
A_ ,A_ ,'should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'
F' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'
F' gpu_total_mem_bnb={gpu_total_mem_bnb}MB' ,)
self.assertEqual(
A_ ,A_ ,F'loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}' )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : int ,A_ : str ,A_ : int ,A_ : float = 3e-3 ,A_ : str = "adafactor" ,A_ : bool = False ,A_ : str = None ,A_ : int = 0 ,A_ : bool = True ,A_ : bool = True ,A_ : bool = True ,A_ : bool = True ,A_ : int = None ,) -> Dict:
A = self.test_file_dir / '../fixtures/tests_samples/wmt_en_ro'
A = self.get_auto_remove_tmp_dir()
A = F'\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(A_ )}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(A_ )}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n '.split()
A = F'\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(A_ )}\n '.split()
A = '\n --do_predict\n '.split()
A = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F'--optim {optim}'.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
A = get_gpu_count()
A = get_torch_dist_unique_port()
A = F'\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n '.split()
A = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(A_ ,env=self.get_env() )
else:
A = ['run_translation.py'] + args
with patch.object(A_ ,'argv' ,A_ ):
main()
return output_dir | 74 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""SCUT-DLVCLab/lilt-roberta-en-base""": (
"""https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"""
),
}
class a__ ( _lowercase ):
"""simple docstring"""
__lowerCamelCase = '''lilt'''
def __init__( self , lowercase=30522 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=2 , lowercase=0.02 , lowercase=1e-12 , lowercase=0 , lowercase="absolute" , lowercase=None , lowercase=4 , lowercase=1024 , **lowercase , ) -> str:
'''simple docstring'''
super().__init__(pad_token_id=A_ , **A_ )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = classifier_dropout
A__ = channel_shrink_ratio
A__ = max_ad_position_embeddings
| 68 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''facebook/deit-base-distilled-patch16-224''': (
'''https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json'''
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[Any] = '''deit'''
def __init__( self : int ,A_ : Optional[Any]=768 ,A_ : Union[str, Any]=12 ,A_ : Dict=12 ,A_ : int=3072 ,A_ : Optional[Any]="gelu" ,A_ : Dict=0.0 ,A_ : Any=0.0 ,A_ : str=0.02 ,A_ : Tuple=1e-12 ,A_ : Union[str, Any]=224 ,A_ : Optional[Any]=16 ,A_ : List[Any]=3 ,A_ : Optional[Any]=True ,A_ : Optional[int]=16 ,**A_ : Union[str, Any] ,) -> Dict:
super().__init__(**A_ )
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = image_size
A = patch_size
A = num_channels
A = qkv_bias
A = encoder_stride
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: int = version.parse('''1.11''' )
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> float:
return 1e-4 | 74 | 0 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
lowerCAmelCase = """examples/"""
lowerCAmelCase = {
"""examples""": (re.compile(r"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(r"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(r"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), r"""\1version=\"VERSION\","""),
"""doc""": (re.compile(r"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
lowerCAmelCase = {
"""init""": """src/diffusers/__init__.py""",
"""setup""": """setup.py""",
}
lowerCAmelCase = """README.md"""
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : int , snake_case_ : Dict ) ->List[Any]:
with open(snake_case__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowerCamelCase__ : int =f.read()
lowerCamelCase__ , lowerCamelCase__ : List[Any] =REPLACE_PATTERNS[pattern]
lowerCamelCase__ : Union[str, Any] =replace.replace('VERSION' , snake_case__ )
lowerCamelCase__ : Optional[int] =re_pattern.sub(snake_case__ , snake_case__ )
with open(snake_case__ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(snake_case__ )
def lowerCAmelCase_ ( snake_case_ : Tuple ) ->Any:
for folder, directories, fnames in os.walk(snake_case__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('research_projects' )
if "legacy" in directories:
directories.remove('legacy' )
for fname in fnames:
if fname.endswith('.py' ):
update_version_in_file(os.path.join(snake_case__ , snake_case__ ) , snake_case__ , pattern='examples' )
def lowerCAmelCase_ ( snake_case_ : List[str] , snake_case_ : List[Any]=False ) ->List[str]:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(snake_case__ , snake_case__ , snake_case__ )
if not patch:
update_version_in_examples(snake_case__ )
def lowerCAmelCase_ ( ) ->Tuple:
lowerCamelCase__ : List[str] ='🤗 Transformers currently provides the following architectures'
lowerCamelCase__ : Optional[Any] ='1. Want to contribute a new model?'
with open(snake_case__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowerCamelCase__ : Any =f.readlines()
# Find the start of the list.
lowerCamelCase__ : List[str] =0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowerCamelCase__ : List[str] =start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('1.' ):
lowerCamelCase__ : Tuple =lines[index].replace(
'https://huggingface.co/docs/diffusers/main/model_doc' , 'https://huggingface.co/docs/diffusers/model_doc' , )
index += 1
with open(snake_case__ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(snake_case__ )
def lowerCAmelCase_ ( ) ->Optional[int]:
with open(REPLACE_FILES['init'] , 'r' ) as f:
lowerCamelCase__ : int =f.read()
lowerCamelCase__ : Optional[Any] =REPLACE_PATTERNS['init'][0].search(snake_case__ ).groups()[0]
return packaging.version.parse(snake_case__ )
def lowerCAmelCase_ ( snake_case_ : Dict=False ) ->Optional[int]:
lowerCamelCase__ : List[str] =get_version()
if patch and default_version.is_devrelease:
raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' )
if default_version.is_devrelease:
lowerCamelCase__ : List[str] =default_version.base_version
elif patch:
lowerCamelCase__ : str =f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
lowerCamelCase__ : Any =f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
lowerCamelCase__ : str =input(f"""Which version are you releasing? [{default_version}]""" )
if len(snake_case__ ) == 0:
lowerCamelCase__ : List[Any] =default_version
print(f"""Updating version to {version}.""" )
global_version_update(snake_case__ , patch=snake_case__ )
def lowerCAmelCase_ ( ) ->str:
lowerCamelCase__ : int =get_version()
lowerCamelCase__ : str =f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
lowerCamelCase__ : Dict =current_version.base_version
# Check with the user we got that right.
lowerCamelCase__ : int =input(f"""Which version are we developing now? [{dev_version}]""" )
if len(snake_case__ ) == 0:
lowerCamelCase__ : Tuple =dev_version
print(f"""Updating version to {version}.""" )
global_version_update(snake_case__ )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
lowerCAmelCase = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work() | 126 |
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _snake_case ( snake_case__ : List[Any] , snake_case__ : Optional[int]=0.999 , snake_case__ : Union[str, Any]="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(snake_case__ : Union[str, Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(snake_case__ : Dict ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' )
A = []
for i in range(snake_case__ ):
A = i / num_diffusion_timesteps
A = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(snake_case__ ) / alpha_bar_fn(snake_case__ ) , snake_case__ ) )
return torch.tensor(snake_case__ , dtype=torch.floataa )
class lowerCAmelCase_ ( _lowercase , _lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[int] = [e.name for e in KarrasDiffusionSchedulers]
_lowerCamelCase: Optional[Any] = 2
@register_to_config
def __init__( self : str ,A_ : int = 1000 ,A_ : float = 0.0_00_85 ,A_ : float = 0.0_12 ,A_ : str = "linear" ,A_ : Optional[Union[np.ndarray, List[float]]] = None ,A_ : str = "epsilon" ,A_ : Optional[bool] = False ,A_ : Optional[bool] = False ,A_ : float = 1.0 ,A_ : str = "linspace" ,A_ : int = 0 ,) -> List[str]:
if trained_betas is not None:
A = torch.tensor(A_ ,dtype=torch.floataa )
elif beta_schedule == "linear":
A = torch.linspace(A_ ,A_ ,A_ ,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
A = (
torch.linspace(beta_start**0.5 ,beta_end**0.5 ,A_ ,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
A = betas_for_alpha_bar(A_ ,alpha_transform_type='cosine' )
elif beta_schedule == "exp":
A = betas_for_alpha_bar(A_ ,alpha_transform_type='exp' )
else:
raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}' )
A = 1.0 - self.betas
A = torch.cumprod(self.alphas ,dim=0 )
# set all values
self.set_timesteps(A_ ,A_ ,A_ )
A = use_karras_sigmas
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Tuple ,A_ : Tuple=None ) -> Tuple:
if schedule_timesteps is None:
A = self.timesteps
A = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
A = 1 if len(A_ ) > 1 else 0
else:
A = timestep.cpu().item() if torch.is_tensor(A_ ) else timestep
A = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : torch.FloatTensor ,A_ : Union[float, torch.FloatTensor] ,) -> torch.FloatTensor:
A = self.index_for_timestep(A_ )
A = self.sigmas[step_index]
A = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : int ,A_ : Union[str, torch.device] = None ,A_ : Optional[int] = None ,) -> Optional[Any]:
A = num_inference_steps
A = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
A = np.linspace(0 ,num_train_timesteps - 1 ,A_ ,dtype=A_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
A = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
A = (np.arange(0 ,A_ ) * step_ratio).round()[::-1].copy().astype(A_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
A = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
A = (np.arange(A_ ,0 ,-step_ratio )).round().copy().astype(A_ )
timesteps -= 1
else:
raise ValueError(
F'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' )
A = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
A = np.log(A_ )
A = np.interp(A_ ,np.arange(0 ,len(A_ ) ) ,A_ )
if self.config.use_karras_sigmas:
A = self._convert_to_karras(in_sigmas=A_ ,num_inference_steps=self.num_inference_steps )
A = np.array([self._sigma_to_t(A_ ,A_ ) for sigma in sigmas] )
A = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
A = torch.from_numpy(A_ ).to(device=A_ )
A = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
A = torch.from_numpy(A_ )
A = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(A_ ).startswith('mps' ):
# mps does not support float64
A = timesteps.to(A_ ,dtype=torch.floataa )
else:
A = timesteps.to(device=A_ )
# empty dt and derivative
A = None
A = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
A = defaultdict(A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[Any] ,A_ : List[str] ) -> Dict:
# get log sigma
A = np.log(A_ )
# get distribution
A = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
A = np.cumsum((dists >= 0) ,axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
A = low_idx + 1
A = log_sigmas[low_idx]
A = log_sigmas[high_idx]
# interpolate sigmas
A = (low - log_sigma) / (low - high)
A = np.clip(A_ ,0 ,1 )
# transform interpolation to time range
A = (1 - w) * low_idx + w * high_idx
A = t.reshape(sigma.shape )
return t
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : torch.FloatTensor ,A_ : int ) -> torch.FloatTensor:
A = in_sigmas[-1].item()
A = in_sigmas[0].item()
A = 7.0 # 7.0 is the value used in the paper
A = np.linspace(0 ,1 ,A_ )
A = sigma_min ** (1 / rho)
A = sigma_max ** (1 / rho)
A = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
return self.dt is None
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Union[torch.FloatTensor, np.ndarray] ,A_ : Union[float, torch.FloatTensor] ,A_ : Union[torch.FloatTensor, np.ndarray] ,A_ : bool = True ,) -> Union[SchedulerOutput, Tuple]:
A = self.index_for_timestep(A_ )
# advance index counter by 1
A = timestep.cpu().item() if torch.is_tensor(A_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
A = self.sigmas[step_index]
A = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
A = self.sigmas[step_index - 1]
A = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
A = 0
A = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
A = sigma_hat if self.state_in_first_order else sigma_next
A = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
A = sigma_hat if self.state_in_first_order else sigma_next
A = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
A = model_output
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' )
if self.config.clip_sample:
A = pred_original_sample.clamp(
-self.config.clip_sample_range ,self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
A = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
A = sigma_next - sigma_hat
# store for 2nd order step
A = derivative
A = dt
A = sample
else:
# 2. 2nd order / Heun's method
A = (sample - pred_original_sample) / sigma_next
A = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
A = self.dt
A = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
A = None
A = None
A = None
A = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=A_ )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : torch.FloatTensor ,A_ : torch.FloatTensor ,A_ : torch.FloatTensor ,) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
A = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(A_ ):
# mps does not support float64
A = self.timesteps.to(original_samples.device ,dtype=torch.floataa )
A = timesteps.to(original_samples.device ,dtype=torch.floataa )
else:
A = self.timesteps.to(original_samples.device )
A = timesteps.to(original_samples.device )
A = [self.index_for_timestep(A_ ,A_ ) for t in timesteps]
A = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
A = sigma.unsqueeze(-1 )
A = original_samples + noise * sigma
return noisy_samples
def __len__( self : Dict ) -> int:
return self.config.num_train_timesteps | 74 | 0 |
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
lowerCamelCase = logging.get_logger(__name__)
class __magic_name__ ( _lowercase ):
'''simple docstring'''
def __init__( self, *lowercase_, **lowercase_ ) -> None:
"""simple docstring"""
warnings.warn(
'''The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DonutImageProcessor instead.''', A_, )
super().__init__(*A_, **A_ )
| 188 |
"""simple docstring"""
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Dict ,A_ : list[int] ) -> None:
A = len(A_ )
A = [0] * len_array
if len_array > 0:
A = array[0]
for i in range(1 ,A_ ):
A = self.prefix_sum[i - 1] + array[i]
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : int ,A_ : int ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : int ) -> bool:
A = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(A_ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod() | 74 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase : Tuple = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase : str = {
'vocab_file': {'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'},
'tokenizer_file': {
'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'
},
}
UpperCAmelCase : int = {'mobilebert-uncased': 5_1_2}
UpperCAmelCase : List[Any] = {}
class lowerCAmelCase__ ( _lowercase ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = MobileBertTokenizer
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Dict="[UNK]" , __SCREAMING_SNAKE_CASE : Any="[SEP]" , __SCREAMING_SNAKE_CASE : str="[PAD]" , __SCREAMING_SNAKE_CASE : Optional[Any]="[CLS]" , __SCREAMING_SNAKE_CASE : str="[MASK]" , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : Dict=None , **__SCREAMING_SNAKE_CASE : Any , ) -> Dict:
"""simple docstring"""
super().__init__(
A_ , tokenizer_file=A_ , do_lower_case=A_ , unk_token=A_ , sep_token=A_ , pad_token=A_ , cls_token=A_ , mask_token=A_ , tokenize_chinese_chars=A_ , strip_accents=A_ , **A_ , )
__SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , A_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , A_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , A_ ) != tokenize_chinese_chars
):
__SCREAMING_SNAKE_CASE = getattr(A_ , normalizer_state.pop("""type""" ) )
__SCREAMING_SNAKE_CASE = do_lower_case
__SCREAMING_SNAKE_CASE = strip_accents
__SCREAMING_SNAKE_CASE = tokenize_chinese_chars
__SCREAMING_SNAKE_CASE = normalizer_class(**A_ )
__SCREAMING_SNAKE_CASE = do_lower_case
def UpperCAmelCase__ ( self : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Any=None ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [self.sep_token_id]
__SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self._tokenizer.model.save(A_ , name=A_ )
return tuple(A_ )
| 267 |
"""simple docstring"""
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
def _snake_case ( snake_case__ : str , snake_case__ : str ):
A = RobertaPreLayerNormConfig.from_pretrained(
snake_case__ , architectures=['RobertaPreLayerNormForMaskedLM'] )
# convert state_dict
A = torch.load(hf_hub_download(repo_id=snake_case__ , filename='pytorch_model.bin' ) )
A = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('roberta.' ):
A = 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ):
continue
A = tensor_value
A = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=snake_case__ , config=snake_case__ , state_dict=snake_case__ )
model.save_pretrained(snake_case__ )
# convert tokenizer
A = AutoTokenizer.from_pretrained(snake_case__ )
tokenizer.save_pretrained(snake_case__ )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint-repo''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowercase = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path) | 74 | 0 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class a_ :
"""simple docstring"""
def __init__( self : Optional[Any] ,snake_case : Optional[Any] ,snake_case : Optional[Any]=13 ,snake_case : List[Any]=30 ,snake_case : Optional[Any]=2 ,snake_case : Optional[int]=3 ,snake_case : int=True ,snake_case : Optional[Any]=True ,snake_case : List[str]=32 ,snake_case : str=2 ,snake_case : str=4 ,snake_case : int=37 ,snake_case : Tuple="gelu" ,snake_case : Any=0.1 ,snake_case : int=0.1 ,snake_case : str=10 ,snake_case : List[str]=0.02 ,snake_case : int=3 ,snake_case : List[Any]=None ,snake_case : int=2 ,):
SCREAMING_SNAKE_CASE =parent
SCREAMING_SNAKE_CASE =batch_size
SCREAMING_SNAKE_CASE =image_size
SCREAMING_SNAKE_CASE =patch_size
SCREAMING_SNAKE_CASE =num_channels
SCREAMING_SNAKE_CASE =is_training
SCREAMING_SNAKE_CASE =use_labels
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =num_hidden_layers
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =hidden_dropout_prob
SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE =type_sequence_label_size
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =scope
SCREAMING_SNAKE_CASE =encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
SCREAMING_SNAKE_CASE =(image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE =num_patches + 2
def _lowerCAmelCase ( self : str ):
SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE =None
if self.use_labels:
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE =self.get_config()
return config, pixel_values, labels
def _lowerCAmelCase ( self : Optional[Any] ):
return DeiTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=A_ ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def _lowerCAmelCase ( self : Dict ,snake_case : Any ,snake_case : Any ,snake_case : int ):
SCREAMING_SNAKE_CASE =TFDeiTModel(config=A_ )
SCREAMING_SNAKE_CASE =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self : Tuple ,snake_case : List[str] ,snake_case : List[str] ,snake_case : Optional[Any] ):
SCREAMING_SNAKE_CASE =TFDeiTForMaskedImageModeling(config=A_ )
SCREAMING_SNAKE_CASE =model(A_ )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
SCREAMING_SNAKE_CASE =1
SCREAMING_SNAKE_CASE =TFDeiTForMaskedImageModeling(A_ )
SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE =model(A_ )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def _lowerCAmelCase ( self : Optional[int] ,snake_case : Union[str, Any] ,snake_case : str ,snake_case : Tuple ):
SCREAMING_SNAKE_CASE =self.type_sequence_label_size
SCREAMING_SNAKE_CASE =TFDeiTForImageClassification(A_ )
SCREAMING_SNAKE_CASE =model(A_ ,labels=A_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE =1
SCREAMING_SNAKE_CASE =TFDeiTForImageClassification(A_ )
SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE =model(A_ ,labels=A_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =config_and_inputs
SCREAMING_SNAKE_CASE ={'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class a_ ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
__UpperCAmelCase = (
{
'''feature-extraction''': TFDeiTModel,
'''image-classification''': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def _lowerCAmelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE =TFDeiTModelTester(self )
SCREAMING_SNAKE_CASE =ConfigTester(self ,config_class=A_ ,has_text_modality=A_ ,hidden_size=37 )
def _lowerCAmelCase ( self : List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds' )
def _lowerCAmelCase ( self : Union[str, Any] ):
pass
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE =model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer) )
SCREAMING_SNAKE_CASE =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ ,tf.keras.layers.Dense ) )
def _lowerCAmelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE =model_class(A_ )
SCREAMING_SNAKE_CASE =inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE =[*signature.parameters.keys()]
SCREAMING_SNAKE_CASE =['pixel_values']
self.assertListEqual(arg_names[:1] ,A_ )
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*A_ )
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
def _lowerCAmelCase ( self : Dict ,snake_case : List[str] ,snake_case : Any ,snake_case : List[Any]=False ):
SCREAMING_SNAKE_CASE =super()._prepare_for_class(A_ ,A_ ,return_labels=A_ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def _lowerCAmelCase ( self : List[Any] ):
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE =TFDeiTModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class a_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowerCAmelCase ( self : Tuple ):
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' )
if is_vision_available()
else None
)
@slow
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =TFDeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' )
SCREAMING_SNAKE_CASE =self.default_image_processor
SCREAMING_SNAKE_CASE =prepare_img()
SCREAMING_SNAKE_CASE =image_processor(images=A_ ,return_tensors='tf' )
# forward pass
SCREAMING_SNAKE_CASE =model(**A_ )
# verify the logits
SCREAMING_SNAKE_CASE =tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape ,A_ )
SCREAMING_SNAKE_CASE =tf.constant([-1.0_266, 0.1_912, -1.2_861] )
self.assertTrue(np.allclose(outputs.logits[0, :3] ,A_ ,atol=1e-4 ) )
| 334 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'''
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[Any] = '''roformer'''
def __init__( self : Tuple ,A_ : Optional[int]=5_0000 ,A_ : Tuple=None ,A_ : Optional[Any]=768 ,A_ : Dict=12 ,A_ : Optional[int]=12 ,A_ : Union[str, Any]=3072 ,A_ : Dict="gelu" ,A_ : Dict=0.1 ,A_ : List[Any]=0.1 ,A_ : List[Any]=1536 ,A_ : List[str]=2 ,A_ : Any=0.02 ,A_ : str=1e-12 ,A_ : Optional[int]=0 ,A_ : List[str]=False ,A_ : Tuple=True ,**A_ : List[str] ,) -> Dict:
super().__init__(pad_token_id=A_ ,**A_ )
A = vocab_size
A = hidden_size if embedding_size is None else embedding_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = hidden_act
A = intermediate_size
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = initializer_range
A = layer_norm_eps
A = rotary_value
A = use_cache
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
A = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A = {0: 'batch', 1: 'sequence'}
A = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] ) | 74 | 0 |
"""simple docstring"""
__lowerCamelCase = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
if not isinstance(snake_case__ , snake_case__ ):
A__ = F'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(snake_case__ )
A__ = ''.join(bin(snake_case__ )[2:].zfill(8 ) for byte in data )
A__ = len(snake_case__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
A__ = b'=' * ((6 - len(snake_case__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(snake_case__ ) % 6)
else:
A__ = b''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(snake_case__ ) , 6 ) ).encode()
+ padding
)
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
if not isinstance(snake_case__ , snake_case__ ) and not isinstance(snake_case__ , snake_case__ ):
A__ = (
'argument should be a bytes-like object or ASCII string, '
F'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(snake_case__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(snake_case__ , snake_case__ ):
try:
A__ = encoded_data.decode('utf-8' )
except UnicodeDecodeError:
raise ValueError('base64 encoded data should only contain ASCII characters' )
A__ = encoded_data.count('=' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(snake_case__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
A__ = encoded_data[:-padding]
A__ = ''.join(
bin(B64_CHARSET.index(snake_case__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
A__ = ''.join(
bin(B64_CHARSET.index(snake_case__ ) )[2:].zfill(6 ) for char in encoded_data )
A__ = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(snake_case__ ) , 8 )
]
return bytes(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 221 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def _snake_case ( snake_case__ : Dict ):
A = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
'decoder.output_projection.weight',
]
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def _snake_case ( snake_case__ : int ):
A , A = emb.weight.shape
A = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ )
A = emb.weight.data
return lin_layer
def _snake_case ( snake_case__ : List[str] , snake_case__ : Any="facebook/mbart-large-en-ro" , snake_case__ : Optional[int]=False , snake_case__ : List[str]=False ):
A = torch.load(snake_case__ , map_location='cpu' )['model']
remove_ignore_keys_(snake_case__ )
A = state_dict['encoder.embed_tokens.weight'].shape[0]
A = MBartConfig.from_pretrained(snake_case__ , vocab_size=snake_case__ )
if mbart_aa and finetuned:
A = 'relu'
A = state_dict['decoder.embed_tokens.weight']
A = MBartForConditionalGeneration(snake_case__ )
model.model.load_state_dict(snake_case__ )
if finetuned:
A = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
_lowercase = parser.parse_args()
_lowercase = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path) | 74 | 0 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def lowercase ( _snake_case : Union[str, Any] , _snake_case : Optional[int] , _snake_case : List[Any] , _snake_case : Tuple ) ->int:
"""simple docstring"""
__snake_case : Optional[Any] = s.rsplit(snake_case__ , snake_case__ )
return new.join(snake_case__ )
def lowercase ( _snake_case : List[Any] ) ->Union[str, Any]:
"""simple docstring"""
return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() )
def lowercase ( _snake_case : Union[str, Any] ) ->List[str]:
"""simple docstring"""
__snake_case : str = {}
__snake_case : Tuple = ['''group_1''', '''group_2''', '''group_3''', '''group_4''']
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
__snake_case : Optional[Any] = key.replace(f"""{group_key}.""" , f"""{group_key}.group.""" )
if "res_path" in key:
__snake_case : str = key.replace('''res_path.''' , '''res_path.path.''' )
if key.endswith('''.w''' ):
__snake_case : str = rreplace(snake_case__ , '''.w''' , '''.weight''' , 1 )
if key.endswith('''.b''' ):
__snake_case : Optional[Any] = rreplace(snake_case__ , '''.b''' , '''.bias''' , 1 )
__snake_case : str = value.float()
return upgrade
@torch.no_grad()
def lowercase ( _snake_case : List[Any] , _snake_case : int , _snake_case : Tuple=None , _snake_case : str=True ) ->int:
"""simple docstring"""
from dall_e import Encoder
__snake_case : int = Encoder()
if os.path.exists(snake_case__ ):
__snake_case : List[str] = torch.load(snake_case__ )
else:
__snake_case : Tuple = torch.hub.load_state_dict_from_url(snake_case__ )
if isinstance(snake_case__ , snake_case__ ):
__snake_case : List[Any] = ckpt.state_dict()
encoder.load_state_dict(snake_case__ )
if config_path is not None:
__snake_case : List[Any] = FlavaImageCodebookConfig.from_pretrained(snake_case__ )
else:
__snake_case : Dict = FlavaImageCodebookConfig()
__snake_case : List[str] = FlavaImageCodebook(snake_case__ ).eval()
__snake_case : List[Any] = encoder.state_dict()
__snake_case : Dict = upgrade_state_dict(snake_case__ )
hf_model.load_state_dict(snake_case__ )
__snake_case : List[Any] = hf_model.state_dict()
__snake_case : List[Any] = count_parameters(snake_case__ )
__snake_case : Dict = count_parameters(snake_case__ )
assert torch.allclose(snake_case__ , snake_case__ , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(snake_case__ )
else:
return hf_state_dict
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 102 |
"""simple docstring"""
import argparse
import struct
import unittest
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Tuple ,A_ : bytes ) -> None:
A = data
# Initialize hash values
A = [
0X6_A_0_9_E_6_6_7,
0XB_B_6_7_A_E_8_5,
0X3_C_6_E_F_3_7_2,
0XA_5_4_F_F_5_3_A,
0X5_1_0_E_5_2_7_F,
0X9_B_0_5_6_8_8_C,
0X1_F_8_3_D_9_A_B,
0X5_B_E_0_C_D_1_9,
]
# Initialize round constants
A = [
0X4_2_8_A_2_F_9_8,
0X7_1_3_7_4_4_9_1,
0XB_5_C_0_F_B_C_F,
0XE_9_B_5_D_B_A_5,
0X3_9_5_6_C_2_5_B,
0X5_9_F_1_1_1_F_1,
0X9_2_3_F_8_2_A_4,
0XA_B_1_C_5_E_D_5,
0XD_8_0_7_A_A_9_8,
0X1_2_8_3_5_B_0_1,
0X2_4_3_1_8_5_B_E,
0X5_5_0_C_7_D_C_3,
0X7_2_B_E_5_D_7_4,
0X8_0_D_E_B_1_F_E,
0X9_B_D_C_0_6_A_7,
0XC_1_9_B_F_1_7_4,
0XE_4_9_B_6_9_C_1,
0XE_F_B_E_4_7_8_6,
0X0_F_C_1_9_D_C_6,
0X2_4_0_C_A_1_C_C,
0X2_D_E_9_2_C_6_F,
0X4_A_7_4_8_4_A_A,
0X5_C_B_0_A_9_D_C,
0X7_6_F_9_8_8_D_A,
0X9_8_3_E_5_1_5_2,
0XA_8_3_1_C_6_6_D,
0XB_0_0_3_2_7_C_8,
0XB_F_5_9_7_F_C_7,
0XC_6_E_0_0_B_F_3,
0XD_5_A_7_9_1_4_7,
0X0_6_C_A_6_3_5_1,
0X1_4_2_9_2_9_6_7,
0X2_7_B_7_0_A_8_5,
0X2_E_1_B_2_1_3_8,
0X4_D_2_C_6_D_F_C,
0X5_3_3_8_0_D_1_3,
0X6_5_0_A_7_3_5_4,
0X7_6_6_A_0_A_B_B,
0X8_1_C_2_C_9_2_E,
0X9_2_7_2_2_C_8_5,
0XA_2_B_F_E_8_A_1,
0XA_8_1_A_6_6_4_B,
0XC_2_4_B_8_B_7_0,
0XC_7_6_C_5_1_A_3,
0XD_1_9_2_E_8_1_9,
0XD_6_9_9_0_6_2_4,
0XF_4_0_E_3_5_8_5,
0X1_0_6_A_A_0_7_0,
0X1_9_A_4_C_1_1_6,
0X1_E_3_7_6_C_0_8,
0X2_7_4_8_7_7_4_C,
0X3_4_B_0_B_C_B_5,
0X3_9_1_C_0_C_B_3,
0X4_E_D_8_A_A_4_A,
0X5_B_9_C_C_A_4_F,
0X6_8_2_E_6_F_F_3,
0X7_4_8_F_8_2_E_E,
0X7_8_A_5_6_3_6_F,
0X8_4_C_8_7_8_1_4,
0X8_C_C_7_0_2_0_8,
0X9_0_B_E_F_F_F_A,
0XA_4_5_0_6_C_E_B,
0XB_E_F_9_A_3_F_7,
0XC_6_7_1_7_8_F_2,
]
A = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : bytes ) -> bytes:
A = B'\x80' + (B'\x00' * (63 - (len(A_ ) + 8) % 64))
A = struct.pack('>Q' ,(len(A_ ) * 8) )
return data + padding + big_endian_integer
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> None:
# Convert into blocks of 64 bytes
A = [
self.preprocessed_data[x : x + 64]
for x in range(0 ,len(self.preprocessed_data ) ,64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
A = list(struct.unpack('>16L' ,A_ ) )
# add 48 0-ed integers
words += [0] * 48
A , A , A , A , A , A , A , A = self.hashes
for index in range(0 ,64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
A = (
self.ror(words[index - 15] ,7 )
^ self.ror(words[index - 15] ,18 )
^ (words[index - 15] >> 3)
)
A = (
self.ror(words[index - 2] ,17 )
^ self.ror(words[index - 2] ,19 )
^ (words[index - 2] >> 10)
)
A = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X1_0_0_0_0_0_0_0_0
# Compression
A = self.ror(A_ ,6 ) ^ self.ror(A_ ,11 ) ^ self.ror(A_ ,25 )
A = (e & f) ^ ((~e & 0XF_F_F_F_F_F_F_F) & g)
A = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X1_0_0_0_0_0_0_0_0
A = self.ror(A_ ,2 ) ^ self.ror(A_ ,13 ) ^ self.ror(A_ ,22 )
A = (a & b) ^ (a & c) ^ (b & c)
A = (sa + maj) % 0X1_0_0_0_0_0_0_0_0
A , A , A , A , A , A , A , A = (
g,
f,
e,
((d + tempa) % 0X1_0_0_0_0_0_0_0_0),
c,
b,
a,
((tempa + tempa) % 0X1_0_0_0_0_0_0_0_0),
)
A = [a, b, c, d, e, f, g, h]
# Modify final values
A = [
((element + mutated_hash_values[index]) % 0X1_0_0_0_0_0_0_0_0)
for index, element in enumerate(self.hashes )
]
A = ''.join([hex(A_ )[2:].zfill(8 ) for value in self.hashes] )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : int ,A_ : int ) -> int:
return 0XF_F_F_F_F_F_F_F & (value << (32 - rotations)) | (value >> rotations)
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> None:
import hashlib
A = bytes('Test String' ,'utf-8' )
self.assertEqual(SHAaaa(A_ ).hash ,hashlib.shaaaa(A_ ).hexdigest() )
def _snake_case ( ):
import doctest
doctest.testmod()
A = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
A = parser.parse_args()
A = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
A = f.read()
else:
A = bytes(snake_case__ , 'utf-8' )
print(SHAaaa(snake_case__ ).hash )
if __name__ == "__main__":
main() | 74 | 0 |
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class lowerCamelCase_ ( _lowercase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :Any = tempfile.mkdtemp()
__UpperCamelCase :List[str] = 8
# DPR tok
__UpperCamelCase :List[str] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__UpperCamelCase :Tuple = os.path.join(self.tmpdirname , '''dpr_tokenizer''')
os.makedirs(A_ , exist_ok=A_)
__UpperCamelCase :Dict = os.path.join(A_ , DPR_VOCAB_FILES_NAMES['''vocab_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens]))
# BART tok
__UpperCamelCase :int = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__UpperCamelCase :Tuple = dict(zip(A_ , range(len(A_))))
__UpperCamelCase :List[Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__UpperCamelCase :Optional[int] = {'''unk_token''': '''<unk>'''}
__UpperCamelCase :Tuple = os.path.join(self.tmpdirname , '''bart_tokenizer''')
os.makedirs(A_ , exist_ok=A_)
__UpperCamelCase :Tuple = os.path.join(A_ , BART_VOCAB_FILES_NAMES['''vocab_file'''])
__UpperCamelCase :Tuple = os.path.join(A_ , BART_VOCAB_FILES_NAMES['''merges_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as fp:
fp.write(json.dumps(A_) + '''\n''')
with open(self.merges_file , '''w''' , encoding='''utf-8''') as fp:
fp.write('''\n'''.join(A_))
def UpperCamelCase__ ( self) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer'''))
def UpperCamelCase__ ( self) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer'''))
def UpperCamelCase__ ( self) -> Optional[int]:
shutil.rmtree(self.tmpdirname)
@require_tokenizers
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :Union[str, Any] = os.path.join(self.tmpdirname , '''rag_tokenizer''')
__UpperCamelCase :int = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict())
__UpperCamelCase :Optional[int] = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer())
rag_config.save_pretrained(A_)
rag_tokenizer.save_pretrained(A_)
__UpperCamelCase :str = RagTokenizer.from_pretrained(A_ , config=A_)
self.assertIsInstance(new_rag_tokenizer.question_encoder , A_)
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab())
self.assertIsInstance(new_rag_tokenizer.generator , A_)
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab())
@slow
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :str = RagTokenizer.from_pretrained('''facebook/rag-token-nq''')
__UpperCamelCase :Any = [
'''who got the first nobel prize in physics''',
'''when is the next deadpool movie being released''',
'''which mode is used for short wave broadcast service''',
'''who is the owner of reading football club''',
'''when is the next scandal episode coming out''',
'''when is the last time the philadelphia won the superbowl''',
'''what is the most current adobe flash player version''',
'''how many episodes are there in dragon ball z''',
'''what is the first step in the evolution of the eye''',
'''where is gall bladder situated in human body''',
'''what is the main mineral in lithium batteries''',
'''who is the president of usa right now''',
'''where do the greasers live in the outsiders''',
'''panda is a national animal of which country''',
'''what is the name of manchester united stadium''',
]
__UpperCamelCase :str = tokenizer(A_)
self.assertIsNotNone(A_)
@slow
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :int = RagTokenizer.from_pretrained('''facebook/rag-sequence-nq''')
__UpperCamelCase :Any = [
'''who got the first nobel prize in physics''',
'''when is the next deadpool movie being released''',
'''which mode is used for short wave broadcast service''',
'''who is the owner of reading football club''',
'''when is the next scandal episode coming out''',
'''when is the last time the philadelphia won the superbowl''',
'''what is the most current adobe flash player version''',
'''how many episodes are there in dragon ball z''',
'''what is the first step in the evolution of the eye''',
'''where is gall bladder situated in human body''',
'''what is the main mineral in lithium batteries''',
'''who is the president of usa right now''',
'''where do the greasers live in the outsiders''',
'''panda is a national animal of which country''',
'''what is the name of manchester united stadium''',
]
__UpperCamelCase :Tuple = tokenizer(A_)
self.assertIsNotNone(A_)
| 43 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowercase = {'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''DeiTFeatureExtractor''']
_lowercase = ['''DeiTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DeiTForImageClassification''',
'''DeiTForImageClassificationWithTeacher''',
'''DeiTForMaskedImageModeling''',
'''DeiTModel''',
'''DeiTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDeiTForImageClassification''',
'''TFDeiTForImageClassificationWithTeacher''',
'''TFDeiTForMaskedImageModeling''',
'''TFDeiTModel''',
'''TFDeiTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 74 | 0 |
from collections import deque
from math import floor
from random import random
from time import time
class __magic_name__ :
def __init__( self : List[Any] ) -> str:
'''simple docstring'''
UpperCamelCase__ : Tuple = {}
def UpperCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : List[Any] , lowerCamelCase__ : int=1 ) -> Optional[Any]:
'''simple docstring'''
if self.graph.get(A_ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
UpperCamelCase__ : List[str] = [[w, v]]
if not self.graph.get(A_ ):
UpperCamelCase__ : Any = []
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return list(self.graph )
def UpperCAmelCase__ ( self : Dict , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Dict ) -> Any:
'''simple docstring'''
if self.graph.get(A_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(A_ )
def UpperCAmelCase__ ( self : str , lowerCamelCase__ : int=-2 , lowerCamelCase__ : Dict=-1 ) -> Any:
'''simple docstring'''
if s == d:
return []
UpperCamelCase__ : Any = []
UpperCamelCase__ : List[Any] = []
if s == -2:
UpperCamelCase__ : Tuple = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
UpperCamelCase__ : List[Any] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCamelCase__ : str = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(A_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
UpperCamelCase__ : Dict = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(A_ ) != 0:
UpperCamelCase__ : Union[str, Any] = stack[len(A_ ) - 1]
else:
UpperCamelCase__ : str = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return visited
def UpperCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : List[str]=-1 ) -> Optional[int]:
'''simple docstring'''
if c == -1:
UpperCamelCase__ : str = floor(random() * 10000 ) + 10
for i in range(A_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
UpperCamelCase__ : int = floor(random() * c ) + 1
if n != i:
self.add_pair(A_ , A_ , 1 )
def UpperCAmelCase__ ( self : Any , lowerCamelCase__ : List[str]=-2 ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : Tuple = deque()
UpperCamelCase__ : Optional[int] = []
if s == -2:
UpperCamelCase__ : int = list(self.graph )[0]
d.append(A_ )
visited.append(A_ )
while d:
UpperCamelCase__ : Optional[int] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def UpperCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Optional[int] ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : int = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def UpperCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : List[str] ) -> List[str]:
'''simple docstring'''
return len(self.graph[u] )
def UpperCAmelCase__ ( self : Tuple , lowerCamelCase__ : List[str]=-2 ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = []
UpperCamelCase__ : Optional[Any] = []
if s == -2:
UpperCamelCase__ : Optional[int] = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
UpperCamelCase__ : Any = s
UpperCamelCase__ : List[Any] = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCamelCase__ : List[Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCamelCase__ : Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(A_ ) != 0:
UpperCamelCase__ : Optional[int] = stack[len(A_ ) - 1]
else:
UpperCamelCase__ : Dict = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return sorted_nodes
def UpperCAmelCase__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : Tuple = []
UpperCamelCase__ : int = []
UpperCamelCase__ : Tuple = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
UpperCamelCase__ : List[str] = -2
UpperCamelCase__ : List[str] = []
UpperCamelCase__ : List[str] = s
UpperCamelCase__ : Dict = False
UpperCamelCase__ : Optional[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCamelCase__ : str = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCamelCase__ : Any = len(A_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCamelCase__ : List[str] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCamelCase__ : Any = True
if len(A_ ) != 0:
UpperCamelCase__ : Tuple = stack[len(A_ ) - 1]
else:
UpperCamelCase__ : Dict = False
indirect_parents.append(A_ )
UpperCamelCase__ : Union[str, Any] = s
UpperCamelCase__ : Optional[int] = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return list(A_ )
def UpperCAmelCase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : List[str] = []
UpperCamelCase__ : Optional[Any] = []
UpperCamelCase__ : Dict = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
UpperCamelCase__ : List[str] = -2
UpperCamelCase__ : List[str] = []
UpperCamelCase__ : Any = s
UpperCamelCase__ : Tuple = False
UpperCamelCase__ : List[str] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCamelCase__ : Tuple = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCamelCase__ : Union[str, Any] = len(A_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCamelCase__ : List[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCamelCase__ : List[Any] = True
if len(A_ ) != 0:
UpperCamelCase__ : Union[str, Any] = stack[len(A_ ) - 1]
else:
UpperCamelCase__ : int = False
indirect_parents.append(A_ )
UpperCamelCase__ : Union[str, Any] = s
UpperCamelCase__ : Tuple = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return False
def UpperCAmelCase__ ( self : Any , lowerCamelCase__ : Optional[Any]=-2 , lowerCamelCase__ : Union[str, Any]=-1 ) -> str:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = time()
self.dfs(A_ , A_ )
UpperCamelCase__ : Optional[int] = time()
return end - begin
def UpperCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : str=-2 ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = time()
self.bfs(A_ )
UpperCamelCase__ : int = time()
return end - begin
class __magic_name__ :
def __init__( self : int ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : Any = {}
def UpperCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : int=1 ) -> str:
'''simple docstring'''
if self.graph.get(A_ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
UpperCamelCase__ : Optional[Any] = [[w, v]]
# add the other way
if self.graph.get(A_ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
UpperCamelCase__ : Optional[int] = [[w, u]]
def UpperCAmelCase__ ( self : Any , lowerCamelCase__ : Dict , lowerCamelCase__ : int ) -> int:
'''simple docstring'''
if self.graph.get(A_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(A_ )
# the other way round
if self.graph.get(A_ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(A_ )
def UpperCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Any=-2 , lowerCamelCase__ : Optional[Any]=-1 ) -> Tuple:
'''simple docstring'''
if s == d:
return []
UpperCamelCase__ : List[Any] = []
UpperCamelCase__ : List[Any] = []
if s == -2:
UpperCamelCase__ : Dict = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
UpperCamelCase__ : int = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCamelCase__ : List[Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(A_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
UpperCamelCase__ : Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(A_ ) != 0:
UpperCamelCase__ : Optional[Any] = stack[len(A_ ) - 1]
else:
UpperCamelCase__ : Union[str, Any] = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return visited
def UpperCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Dict=-1 ) -> Tuple:
'''simple docstring'''
if c == -1:
UpperCamelCase__ : List[str] = floor(random() * 10000 ) + 10
for i in range(A_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
UpperCamelCase__ : Union[str, Any] = floor(random() * c ) + 1
if n != i:
self.add_pair(A_ , A_ , 1 )
def UpperCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Union[str, Any]=-2 ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : Tuple = deque()
UpperCamelCase__ : Any = []
if s == -2:
UpperCamelCase__ : List[Any] = list(self.graph )[0]
d.append(A_ )
visited.append(A_ )
while d:
UpperCamelCase__ : Tuple = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def UpperCAmelCase__ ( self : Tuple , lowerCamelCase__ : Tuple ) -> Any:
'''simple docstring'''
return len(self.graph[u] )
def UpperCAmelCase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ : Tuple = []
UpperCamelCase__ : Tuple = []
UpperCamelCase__ : str = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
UpperCamelCase__ : Union[str, Any] = -2
UpperCamelCase__ : List[Any] = []
UpperCamelCase__ : Optional[int] = s
UpperCamelCase__ : Optional[int] = False
UpperCamelCase__ : Union[str, Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCamelCase__ : Tuple = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCamelCase__ : Optional[int] = len(A_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCamelCase__ : List[str] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCamelCase__ : Union[str, Any] = True
if len(A_ ) != 0:
UpperCamelCase__ : Dict = stack[len(A_ ) - 1]
else:
UpperCamelCase__ : Any = False
indirect_parents.append(A_ )
UpperCamelCase__ : str = s
UpperCamelCase__ : Any = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return list(A_ )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : Any = []
UpperCamelCase__ : Union[str, Any] = []
UpperCamelCase__ : Optional[int] = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
UpperCamelCase__ : Union[str, Any] = -2
UpperCamelCase__ : List[str] = []
UpperCamelCase__ : Optional[int] = s
UpperCamelCase__ : int = False
UpperCamelCase__ : Dict = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCamelCase__ : int = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCamelCase__ : Tuple = len(A_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCamelCase__ : Tuple = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCamelCase__ : Tuple = True
if len(A_ ) != 0:
UpperCamelCase__ : Union[str, Any] = stack[len(A_ ) - 1]
else:
UpperCamelCase__ : Tuple = False
indirect_parents.append(A_ )
UpperCamelCase__ : List[Any] = s
UpperCamelCase__ : List[str] = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return False
def UpperCAmelCase__ ( self : Any ) -> List[str]:
'''simple docstring'''
return list(self.graph )
def UpperCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Tuple=-2 , lowerCamelCase__ : Any=-1 ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : List[Any] = time()
self.dfs(A_ , A_ )
UpperCamelCase__ : Union[str, Any] = time()
return end - begin
def UpperCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Union[str, Any]=-2 ) -> str:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = time()
self.bfs(A_ )
UpperCamelCase__ : List[Any] = time()
return end - begin
| 146 |
"""simple docstring"""
from __future__ import annotations
import requests
def _snake_case ( snake_case__ : str ):
A = F'https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'
return requests.get(snake_case__ ).json()
def _snake_case ( snake_case__ : int = 10 ):
A = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'
A = requests.get(snake_case__ ).json()[:max_stories]
return [get_hackernews_story(snake_case__ ) for story_id in story_ids]
def _snake_case ( snake_case__ : int = 10 ):
A = hackernews_top_stories(snake_case__ )
return "\n".join('* [{title}]({url})'.format(**snake_case__ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown()) | 74 | 0 |
'''simple docstring'''
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 346 |
"""simple docstring"""
from string import ascii_uppercase
_lowercase = {char: i for i, char in enumerate(ascii_uppercase)}
_lowercase = dict(enumerate(ascii_uppercase))
def _snake_case ( snake_case__ : str , snake_case__ : str ):
A = len(snake_case__ )
A = 0
while True:
if x == i:
A = 0
if len(snake_case__ ) == len(snake_case__ ):
break
key += key[i]
i += 1
return key
def _snake_case ( snake_case__ : str , snake_case__ : str ):
A = ''
A = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
A = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def _snake_case ( snake_case__ : str , snake_case__ : str ):
A = ''
A = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
A = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def _snake_case ( ):
A = 'THE GERMAN ATTACK'
A = 'SECRET'
A = generate_key(snake_case__ , snake_case__ )
A = cipher_text(snake_case__ , snake_case__ )
print(F'Encrypted Text = {s}' )
print(F'Original Text = {original_text(snake_case__ , snake_case__ )}' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 74 | 0 |
"""simple docstring"""
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowercase_ = input('Enter image url: ').strip()
print(F'''Downloading image from {url} ...''')
lowercase_ = BeautifulSoup(requests.get(url).content, 'html.parser')
# The image URL is in the content field of the first meta tag with property og:image
lowercase_ = soup.find('meta', {'property': 'og:image'})['content']
lowercase_ = requests.get(image_url).content
lowercase_ = F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg'''
with open(file_name, 'wb') as fp:
fp.write(image_data)
print(F'''Done. Image saved to disk as {file_name}.''')
| 266 |
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : List[Any] ) -> Optional[Any]:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] ,model_result['ss'] ):
A = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(A_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
A = 'sshleifer/tiny-gpt2'
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
A = 'sgugger/tiny-distilbert-classification'
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,only_pretrain_model=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
A = 'sshleifer/tiny-gpt2'
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,torchscript=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == 'cpu' ,'Cant do half precision' )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
A = 'sshleifer/tiny-gpt2'
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,fpaa=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
A = 'sshleifer/tiny-gpt2'
A = AutoConfig.from_pretrained(A_ )
# set architectures equal to `None`
A = None
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ ,configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
A = 'sshleifer/tiny-gpt2'
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == 'cpu' ,'Can\'t do half precision' )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
A = 'sshleifer/tiny-gpt2'
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,fpaa=A_ ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
A = 'sshleifer/tiny-gpt2'
A = AutoConfig.from_pretrained(A_ )
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ ,configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
A = 'sshleifer/tinier_bart'
A = AutoConfig.from_pretrained(A_ )
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ ,configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
A = 'sshleifer/tiny-gpt2'
A = AutoConfig.from_pretrained(A_ )
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ ,configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
A = 'sshleifer/tinier_bart'
A = AutoConfig.from_pretrained(A_ )
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ ,configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
A = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,save_to_csv=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(A_ ,'inf_time.csv' ) ,train_memory_csv_file=os.path.join(A_ ,'train_mem.csv' ) ,inference_memory_csv_file=os.path.join(A_ ,'inf_mem.csv' ) ,train_time_csv_file=os.path.join(A_ ,'train_time.csv' ) ,env_info_csv_file=os.path.join(A_ ,'env.csv' ) ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
benchmark.run()
self.assertTrue(Path(os.path.join(A_ ,'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(A_ ,'train_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(A_ ,'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(A_ ,'train_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(A_ ,'env.csv' ) ).exists() )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
A = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(A_ : Optional[int] ):
self.assertTrue(hasattr(A_ ,'sequential' ) )
self.assertTrue(hasattr(A_ ,'cumulative' ) )
self.assertTrue(hasattr(A_ ,'current' ) )
self.assertTrue(hasattr(A_ ,'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(A_ ,'log.txt' ) ,log_print=A_ ,trace_memory_line_by_line=A_ ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(A_ ,'log.txt' ) ).exists() ) | 74 | 0 |
import os
from collections.abc import Iterator
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str = "." ) -> Union[str, Any]:
'''simple docstring'''
for dir_path, dir_names, filenames in os.walk(snake_case__ ):
A__ = [d for d in dir_names if d != "scripts" and d[0] not in "._"]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(snake_case__ )[1] in (".py", ".ipynb"):
yield os.path.join(snake_case__ , snake_case__ ).lstrip("./" )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return F'{i * " "}*' if i else "\n##"
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: str ) -> List[Any]:
'''simple docstring'''
A__ = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(snake_case__ ) or old_parts[i] != new_part) and new_part:
print(F'{md_prefix(snake_case__ )} {new_part.replace("_" , " " ).title()}' )
return new_path
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str = "." ) -> Optional[Any]:
'''simple docstring'''
A__ = ""
for filepath in sorted(good_file_paths(snake_case__ ) ):
A__ , A__ = os.path.split(snake_case__ )
if filepath != old_path:
A__ = print_path(snake_case__ , snake_case__ )
A__ = (filepath.count(os.sep ) + 1) if filepath else 0
A__ = F'{filepath}/{filename}'.replace(" " , "%20" )
A__ = os.path.splitext(filename.replace("_" , " " ).title() )[0]
print(F'{md_prefix(snake_case__ )} [{filename}]({url})' )
if __name__ == "__main__":
print_directory_md(""".""")
| 68 |
"""simple docstring"""
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
_lowercase = re.compile(r'''^(?P<major>\d+)''' r'''\.(?P<minor>\d+)''' r'''\.(?P<patch>\d+)$''')
@total_ordering
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
_lowerCamelCase: str
_lowerCamelCase: Optional[str] = None
_lowerCamelCase: Optional[Union[str, int]] = None
_lowerCamelCase: Optional[Union[str, int]] = None
_lowerCamelCase: Optional[Union[str, int]] = None
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
A , A , A = _str_to_version_tuple(self.version_str )
def __repr__( self : Optional[int] ) -> Dict:
return F'{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
return self.major, self.minor, self.patch
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Tuple ) -> Union[str, Any]:
if isinstance(A_ ,A_ ):
return Version(A_ )
elif isinstance(A_ ,A_ ):
return other
raise TypeError(F'{other} (type {type(A_ )}) cannot be compared to version.' )
def __eq__( self : List[Any] ,A_ : Dict ) -> Any:
try:
A = self._validate_operand(A_ )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self : List[Any] ,A_ : Optional[int] ) -> Tuple:
A = self._validate_operand(A_ )
return self.tuple < other.tuple
def __hash__( self : Union[str, Any] ) -> Union[str, Any]:
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any ,A_ : List[str] ) -> List[str]:
A = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
return self.version_str
def _snake_case ( snake_case__ : List[str] ):
A = _VERSION_REG.match(snake_case__ )
if not res:
raise ValueError(F'Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.' )
return tuple(int(snake_case__ ) for v in [res.group('major' ), res.group('minor' ), res.group('patch' )] )
def _snake_case ( snake_case__ : str ):
return ".".join(str(snake_case__ ) for v in version_tuple ) | 74 | 0 |
"""simple docstring"""
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int , snake_case_ : int ) ->Optional[Any]:
lowerCamelCase__ : List[Any] =(num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def lowerCAmelCase_ ( ) ->Union[str, Any]:
print(sum_of_series(1 , 1 , 1_0 ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 126 |
"""simple docstring"""
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
_lowercase = NewType('''DataClass''', Any)
_lowercase = NewType('''DataClassType''', Any)
def _snake_case ( snake_case__ : Tuple ):
if isinstance(snake_case__ , snake_case__ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F'Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).' )
def _snake_case ( snake_case__ : list ):
A = {str(snake_case__ ): choice for choice in choices}
return lambda snake_case__ : str_to_choice.get(snake_case__ , snake_case__ )
def _snake_case ( *,
snake_case__ : Union[str, List[str]] = None , snake_case__ : str = None , snake_case__ : Any = dataclasses.MISSING , snake_case__ : Callable[[], Any] = dataclasses.MISSING , snake_case__ : dict = None , **snake_case__ : Any , ):
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
A = {}
if aliases is not None:
A = aliases
if help is not None:
A = help
return dataclasses.field(metadata=snake_case__ , default=snake_case__ , default_factory=snake_case__ , **snake_case__ )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Iterable[DataClassType]
def __init__( self : List[str] ,A_ : Union[DataClassType, Iterable[DataClassType]] ,**A_ : Any ) -> Optional[int]:
# To make the default appear when using --help
if "formatter_class" not in kwargs:
A = ArgumentDefaultsHelpFormatter
super().__init__(**A_ )
if dataclasses.is_dataclass(A_ ):
A = [dataclass_types]
A = list(A_ )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(A_ )
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : ArgumentParser ,A_ : dataclasses.Field ) -> Optional[Any]:
A = F'--{field.name}'
A = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type ,A_ ):
raise RuntimeError(
'Unresolved type detected, which should have been done with the help of '
'`typing.get_type_hints` method by default' )
A = kwargs.pop('aliases' ,[] )
if isinstance(A_ ,A_ ):
A = [aliases]
A = getattr(field.type ,'__origin__' ,field.type )
if origin_type is Union or (hasattr(A_ ,'UnionType' ) and isinstance(A_ ,types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(A_ ) not in field.type.__args__
):
raise ValueError(
'Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'
' the argument parser only supports one type per argument.'
F' Problem encountered in field \'{field.name}\'.' )
if type(A_ ) not in field.type.__args__:
# filter `str` in Union
A = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
A = getattr(field.type ,'__origin__' ,field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
A = (
field.type.__args__[0] if isinstance(A_ ,field.type.__args__[1] ) else field.type.__args__[1]
)
A = getattr(field.type ,'__origin__' ,field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
A = {}
if origin_type is Literal or (isinstance(field.type ,A_ ) and issubclass(field.type ,A_ )):
if origin_type is Literal:
A = field.type.__args__
else:
A = [x.value for x in field.type]
A = make_choice_type_function(kwargs['choices'] )
if field.default is not dataclasses.MISSING:
A = field.default
else:
A = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
A = copy(A_ )
# Hack because type=bool in argparse does not behave as we want.
A = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
A = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
A = default
# This tells argparse we accept 0 or 1 value after --field_name
A = '?'
# This is the value that will get picked if we do --field_name (without value)
A = True
elif isclass(A_ ) and issubclass(A_ ,A_ ):
A = field.type.__args__[0]
A = '+'
if field.default_factory is not dataclasses.MISSING:
A = field.default_factory()
elif field.default is dataclasses.MISSING:
A = True
else:
A = field.type
if field.default is not dataclasses.MISSING:
A = field.default
elif field.default_factory is not dataclasses.MISSING:
A = field.default_factory()
else:
A = True
parser.add_argument(A_ ,*A_ ,**A_ )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
A = False
parser.add_argument(F'--no_{field.name}' ,action='store_false' ,dest=field.name ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : DataClassType ) -> List[Any]:
if hasattr(A_ ,'_argument_group_name' ):
A = self.add_argument_group(dtype._argument_group_name )
else:
A = self
try:
A = get_type_hints(A_ )
except NameError:
raise RuntimeError(
F'Type resolution failed for {dtype}. Try declaring the class in global scope or '
'removing line of `from __future__ import annotations` which opts in Postponed '
'Evaluation of Annotations (PEP 563)' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(A_ ):
A = '.'.join(map(A_ ,sys.version_info[:3] ) )
raise RuntimeError(
F'Type resolution failed for {dtype} on Python {python_version}. Try removing '
'line of `from __future__ import annotations` which opts in union types as '
'`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '
'support Python versions that lower than 3.10, you need to use '
'`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '
'`X | None`.' ) from ex
raise
for field in dataclasses.fields(A_ ):
if not field.init:
continue
A = type_hints[field.name]
self._parse_dataclass_field(A_ ,A_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Any=None ,A_ : int=False ,A_ : Any=True ,A_ : List[str]=None ,A_ : Union[str, Any]=None ,) -> Tuple[DataClass, ...]:
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
A = []
if args_filename:
args_files.append(Path(A_ ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('.args' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
A = ArgumentParser()
args_file_parser.add_argument(A_ ,type=A_ ,action='append' )
# Use only remaining args for further parsing (remove the args_file_flag)
A , A = args_file_parser.parse_known_args(args=A_ )
A = vars(A_ ).get(args_file_flag.lstrip('-' ) ,A_ )
if cmd_args_file_paths:
args_files.extend([Path(A_ ) for p in cmd_args_file_paths] )
A = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
A = file_args + args if args is not None else file_args + sys.argv[1:]
A , A = self.parse_known_args(args=A_ )
A = []
for dtype in self.dataclass_types:
A = {f.name for f in dataclasses.fields(A_ ) if f.init}
A = {k: v for k, v in vars(A_ ).items() if k in keys}
for k in keys:
delattr(A_ ,A_ )
A = dtype(**A_ )
outputs.append(A_ )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(A_ )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'Some specified arguments are not used by the HfArgumentParser: {remaining_args}' )
return (*outputs,)
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Dict[str, Any] ,A_ : bool = False ) -> Tuple[DataClass, ...]:
A = set(args.keys() )
A = []
for dtype in self.dataclass_types:
A = {f.name for f in dataclasses.fields(A_ ) if f.init}
A = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
A = dtype(**A_ )
outputs.append(A_ )
if not allow_extra_keys and unused_keys:
raise ValueError(F'Some keys are not used by the HfArgumentParser: {sorted(A_ )}' )
return tuple(A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : bool = False ) -> Tuple[DataClass, ...]:
with open(Path(A_ ) ,encoding='utf-8' ) as open_json_file:
A = json.loads(open_json_file.read() )
A = self.parse_dict(A_ ,allow_extra_keys=A_ )
return tuple(A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : bool = False ) -> Tuple[DataClass, ...]:
A = self.parse_dict(yaml.safe_load(Path(A_ ).read_text() ) ,allow_extra_keys=A_ )
return tuple(A_ ) | 74 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.