code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def A__ ( __lowerCamelCase, __lowerCamelCase=0.9_99, __lowerCamelCase="cosine", ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(__lowerCamelCase ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__lowerCamelCase ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
SCREAMING_SNAKE_CASE_ = []
for i in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = i / num_diffusion_timesteps
SCREAMING_SNAKE_CASE_ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__lowerCamelCase ) / alpha_bar_fn(__lowerCamelCase ), __lowerCamelCase ) )
return torch.tensor(__lowerCamelCase, dtype=torch.floataa )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
UpperCAmelCase_ =[e.name for e in KarrasDiffusionSchedulers]
UpperCAmelCase_ =2
@register_to_config
def __init__( self , _A = 1000 , _A = 0.0_0085 , _A = 0.012 , _A = "linear" , _A = None , _A = "epsilon" , _A = "linspace" , _A = 0 , ) -> List[Any]:
if trained_betas is not None:
SCREAMING_SNAKE_CASE_ = torch.tensor(__UpperCAmelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
SCREAMING_SNAKE_CASE_ = torch.linspace(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
SCREAMING_SNAKE_CASE_ = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , __UpperCAmelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
SCREAMING_SNAKE_CASE_ = betas_for_alpha_bar(__UpperCAmelCase )
else:
raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' )
SCREAMING_SNAKE_CASE_ = 1.0 - self.betas
SCREAMING_SNAKE_CASE_ = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def _UpperCamelCase ( self , _A , _A=None ) -> Tuple:
if schedule_timesteps is None:
SCREAMING_SNAKE_CASE_ = self.timesteps
SCREAMING_SNAKE_CASE_ = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
SCREAMING_SNAKE_CASE_ = 1 if len(__UpperCAmelCase ) > 1 else 0
else:
SCREAMING_SNAKE_CASE_ = timestep.cpu().item() if torch.is_tensor(__UpperCAmelCase ) else timestep
SCREAMING_SNAKE_CASE_ = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _UpperCamelCase ( self ) -> str:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _UpperCamelCase ( self , _A , _A , ) -> torch.FloatTensor:
SCREAMING_SNAKE_CASE_ = self.index_for_timestep(__UpperCAmelCase )
if self.state_in_first_order:
SCREAMING_SNAKE_CASE_ = self.sigmas[step_index]
else:
SCREAMING_SNAKE_CASE_ = self.sigmas_interpol[step_index]
SCREAMING_SNAKE_CASE_ = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _UpperCamelCase ( self , _A , _A = None , _A = None , ) -> int:
SCREAMING_SNAKE_CASE_ = num_inference_steps
SCREAMING_SNAKE_CASE_ = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
SCREAMING_SNAKE_CASE_ = np.linspace(0 , num_train_timesteps - 1 , __UpperCAmelCase , dtype=__UpperCAmelCase )[::-1].copy()
elif self.config.timestep_spacing == "leading":
SCREAMING_SNAKE_CASE_ = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE_ = (np.arange(0 , __UpperCAmelCase ) * step_ratio).round()[::-1].copy().astype(__UpperCAmelCase )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
SCREAMING_SNAKE_CASE_ = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE_ = (np.arange(__UpperCAmelCase , 0 , -step_ratio )).round().copy().astype(__UpperCAmelCase )
timesteps -= 1
else:
raise ValueError(
F'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
SCREAMING_SNAKE_CASE_ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
SCREAMING_SNAKE_CASE_ = torch.from_numpy(np.log(__UpperCAmelCase ) ).to(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = np.interp(__UpperCAmelCase , np.arange(0 , len(__UpperCAmelCase ) ) , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
SCREAMING_SNAKE_CASE_ = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase )
# interpolate sigmas
SCREAMING_SNAKE_CASE_ = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
SCREAMING_SNAKE_CASE_ = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
SCREAMING_SNAKE_CASE_ = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(__UpperCAmelCase ).startswith('''mps''' ):
# mps does not support float64
SCREAMING_SNAKE_CASE_ = torch.from_numpy(__UpperCAmelCase ).to(__UpperCAmelCase , dtype=torch.floataa )
else:
SCREAMING_SNAKE_CASE_ = torch.from_numpy(__UpperCAmelCase ).to(__UpperCAmelCase )
# interpolate timesteps
SCREAMING_SNAKE_CASE_ = self.sigma_to_t(__UpperCAmelCase ).to(__UpperCAmelCase , dtype=timesteps.dtype )
SCREAMING_SNAKE_CASE_ = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
SCREAMING_SNAKE_CASE_ = torch.cat([timesteps[:1], interleaved_timesteps] )
SCREAMING_SNAKE_CASE_ = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
SCREAMING_SNAKE_CASE_ = defaultdict(__UpperCAmelCase )
def _UpperCamelCase ( self , _A ) -> int:
# get log sigma
SCREAMING_SNAKE_CASE_ = sigma.log()
# get distribution
SCREAMING_SNAKE_CASE_ = log_sigma - self.log_sigmas[:, None]
# get sigmas range
SCREAMING_SNAKE_CASE_ = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
SCREAMING_SNAKE_CASE_ = low_idx + 1
SCREAMING_SNAKE_CASE_ = self.log_sigmas[low_idx]
SCREAMING_SNAKE_CASE_ = self.log_sigmas[high_idx]
# interpolate sigmas
SCREAMING_SNAKE_CASE_ = (low - log_sigma) / (low - high)
SCREAMING_SNAKE_CASE_ = w.clamp(0 , 1 )
# transform interpolation to time range
SCREAMING_SNAKE_CASE_ = (1 - w) * low_idx + w * high_idx
SCREAMING_SNAKE_CASE_ = t.view(sigma.shape )
return t
@property
def _UpperCamelCase ( self ) -> Tuple:
return self.sample is None
def _UpperCamelCase ( self , _A , _A , _A , _A = True , ) -> Union[SchedulerOutput, Tuple]:
SCREAMING_SNAKE_CASE_ = self.index_for_timestep(__UpperCAmelCase )
# advance index counter by 1
SCREAMING_SNAKE_CASE_ = timestep.cpu().item() if torch.is_tensor(__UpperCAmelCase ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
SCREAMING_SNAKE_CASE_ = self.sigmas[step_index]
SCREAMING_SNAKE_CASE_ = self.sigmas_interpol[step_index + 1]
SCREAMING_SNAKE_CASE_ = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
SCREAMING_SNAKE_CASE_ = self.sigmas[step_index - 1]
SCREAMING_SNAKE_CASE_ = self.sigmas_interpol[step_index]
SCREAMING_SNAKE_CASE_ = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
SCREAMING_SNAKE_CASE_ = sigma_hat if self.state_in_first_order else sigma_interpol
SCREAMING_SNAKE_CASE_ = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
SCREAMING_SNAKE_CASE_ = sigma_hat if self.state_in_first_order else sigma_interpol
SCREAMING_SNAKE_CASE_ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('''prediction_type not implemented yet: sample''' )
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
SCREAMING_SNAKE_CASE_ = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
SCREAMING_SNAKE_CASE_ = sigma_interpol - sigma_hat
# store for 2nd order step
SCREAMING_SNAKE_CASE_ = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
SCREAMING_SNAKE_CASE_ = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
SCREAMING_SNAKE_CASE_ = sigma_next - sigma_hat
SCREAMING_SNAKE_CASE_ = self.sample
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__UpperCAmelCase )
def _UpperCamelCase ( self , _A , _A , _A , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
SCREAMING_SNAKE_CASE_ = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(__UpperCAmelCase ):
# mps does not support float64
SCREAMING_SNAKE_CASE_ = self.timesteps.to(original_samples.device , dtype=torch.floataa )
SCREAMING_SNAKE_CASE_ = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
SCREAMING_SNAKE_CASE_ = self.timesteps.to(original_samples.device )
SCREAMING_SNAKE_CASE_ = timesteps.to(original_samples.device )
SCREAMING_SNAKE_CASE_ = [self.index_for_timestep(__UpperCAmelCase , __UpperCAmelCase ) for t in timesteps]
SCREAMING_SNAKE_CASE_ = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
SCREAMING_SNAKE_CASE_ = sigma.unsqueeze(-1 )
SCREAMING_SNAKE_CASE_ = original_samples + noise * sigma
return noisy_samples
def __len__( self ) -> int:
return self.config.num_train_timesteps
| 299 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = abs(UpperCamelCase )
lowerCAmelCase__ : List[Any] = 0
while n > 0:
res += n % 10
n //= 10
return res
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = abs(UpperCamelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
return sum(int(UpperCamelCase ) for c in str(abs(UpperCamelCase ) ) )
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(UpperCamelCase , UpperCamelCase ) -> None:
lowerCAmelCase__ : str = f"""{func.__name__}({value})"""
lowerCAmelCase__ : str = timeit(f"""__main__.{call}""" , setup="""import __main__""" )
print(f"""{call:56} = {func(UpperCamelCase )} -- {timing:.4f} seconds""" )
for value in (262144, 1125899906842624, 1267650600228229401496703205376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(UpperCamelCase , UpperCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 37 | 0 |
'''simple docstring'''
from __future__ import annotations
def _snake_case ( _SCREAMING_SNAKE_CASE : dict , _SCREAMING_SNAKE_CASE : str ) -> set[str]:
"""simple docstring"""
lowerCAmelCase, lowerCAmelCase = set(_SCREAMING_SNAKE_CASE ), [start]
while stack:
lowerCAmelCase = stack.pop()
explored.add(_SCREAMING_SNAKE_CASE )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(_SCREAMING_SNAKE_CASE )
return explored
UpperCAmelCase = {
'A': ['B', 'C', 'D'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F'],
'D': ['B', 'D'],
'E': ['B', 'F'],
'F': ['C', 'E', 'G'],
'G': ['F'],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, 'A')) | 187 |
'''simple docstring'''
class __snake_case:
'''simple docstring'''
def __init__( self ) -> None:
lowerCAmelCase = {} # Mapping from char to TrieNode
lowerCAmelCase = False
def __snake_case ( self , A_ ) -> None:
for word in words:
self.insert(A_ )
def __snake_case ( self , A_ ) -> None:
lowerCAmelCase = self
for char in word:
if char not in curr.nodes:
lowerCAmelCase = TrieNode()
lowerCAmelCase = curr.nodes[char]
lowerCAmelCase = True
def __snake_case ( self , A_ ) -> bool:
lowerCAmelCase = self
for char in word:
if char not in curr.nodes:
return False
lowerCAmelCase = curr.nodes[char]
return curr.is_leaf
def __snake_case ( self , A_ ) -> None:
def _delete(A_ , A_ , A_ ) -> bool:
if index == len(A_ ):
# If word does not exist
if not curr.is_leaf:
return False
lowerCAmelCase = False
return len(curr.nodes ) == 0
lowerCAmelCase = word[index]
lowerCAmelCase = curr.nodes.get(A_ )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
lowerCAmelCase = _delete(A_ , A_ , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , A_ , 0 )
def _snake_case ( _SCREAMING_SNAKE_CASE : TrieNode , _SCREAMING_SNAKE_CASE : str ) -> None:
"""simple docstring"""
if node.is_leaf:
print(_SCREAMING_SNAKE_CASE , end=""" """ )
for key, value in node.nodes.items():
print_words(_SCREAMING_SNAKE_CASE , word + key )
def _snake_case ( ) -> bool:
"""simple docstring"""
lowerCAmelCase = """banana bananas bandana band apple all beast""".split()
lowerCAmelCase = TrieNode()
root.insert_many(_SCREAMING_SNAKE_CASE )
# print_words(root, "")
assert all(root.find(_SCREAMING_SNAKE_CASE ) for word in words )
assert root.find("""banana""" )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
assert root.find("""apple""" )
assert root.find("""all""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def _snake_case ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : bool ) -> None:
"""simple docstring"""
print(str(_SCREAMING_SNAKE_CASE ) , """works!""" if passes else """doesn't work :(""" )
def _snake_case ( ) -> None:
"""simple docstring"""
assert test_trie()
def _snake_case ( ) -> None:
"""simple docstring"""
print_results("""Testing trie functionality""" , test_trie() )
if __name__ == "__main__":
main() | 187 | 1 |
"""simple docstring"""
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase_ ( _lowerCamelCase: Optional[int] , _lowerCamelCase: Any , _lowerCamelCase: Any , _lowerCamelCase: int="attention" ) -> Tuple:
'''simple docstring'''
__lowerCamelCase : Optional[Any] = params[F"""{prefix}/layers_{i}/{layer_name}/key/kernel"""]
__lowerCamelCase : List[str] = params[F"""{prefix}/layers_{i}/{layer_name}/out/kernel"""]
__lowerCamelCase : Dict = params[F"""{prefix}/layers_{i}/{layer_name}/query/kernel"""]
__lowerCamelCase : int = params[F"""{prefix}/layers_{i}/{layer_name}/value/kernel"""]
return k, o, q, v
def lowercase_ ( _lowerCamelCase: int , _lowerCamelCase: Any , _lowerCamelCase: int , _lowerCamelCase: Tuple=False ) -> Any:
'''simple docstring'''
if split_mlp_wi:
__lowerCamelCase : List[str] = params[F"""{prefix}/layers_{i}/mlp/wi_0/kernel"""]
__lowerCamelCase : Optional[Any] = params[F"""{prefix}/layers_{i}/mlp/wi_1/kernel"""]
__lowerCamelCase : Dict = (wi_a, wi_a)
else:
__lowerCamelCase : Any = params[F"""{prefix}/layers_{i}/mlp/wi/kernel"""]
__lowerCamelCase : int = params[F"""{prefix}/layers_{i}/mlp/wo/kernel"""]
return wi, wo
def lowercase_ ( _lowerCamelCase: Union[str, Any] , _lowerCamelCase: Dict , _lowerCamelCase: Any , _lowerCamelCase: Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return params[F"""{prefix}/layers_{i}/{layer_name}/scale"""]
def lowercase_ ( _lowerCamelCase: dict , *, _lowerCamelCase: int , _lowerCamelCase: bool ) -> Tuple:
'''simple docstring'''
__lowerCamelCase : List[Any] = traverse_util.flatten_dict(variables["target"] )
__lowerCamelCase : int = {"/".join(_lowerCamelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__lowerCamelCase : Optional[int] = "encoder/layers_0/mlp/wi_0/kernel" in old
print("Split MLP:" , _lowerCamelCase )
__lowerCamelCase : List[Any] = collections.OrderedDict()
# Shared embeddings.
__lowerCamelCase : List[str] = old["token_embedder/embedding"]
# Encoder.
for i in range(_lowerCamelCase ):
# Block i, layer 0 (Self Attention).
__lowerCamelCase : Optional[Any] = tax_layer_norm_lookup(_lowerCamelCase , _lowerCamelCase , "encoder" , "pre_attention_layer_norm" )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : int = tax_attention_lookup(_lowerCamelCase , _lowerCamelCase , "encoder" , "attention" )
__lowerCamelCase : Any = layer_norm
__lowerCamelCase : Any = k.T
__lowerCamelCase : List[Any] = o.T
__lowerCamelCase : int = q.T
__lowerCamelCase : str = v.T
# Block i, layer 1 (MLP).
__lowerCamelCase : str = tax_layer_norm_lookup(_lowerCamelCase , _lowerCamelCase , "encoder" , "pre_mlp_layer_norm" )
__lowerCamelCase , __lowerCamelCase : Any = tax_mlp_lookup(_lowerCamelCase , _lowerCamelCase , "encoder" , _lowerCamelCase )
__lowerCamelCase : Tuple = layer_norm
if split_mlp_wi:
__lowerCamelCase : Dict = wi[0].T
__lowerCamelCase : Dict = wi[1].T
else:
__lowerCamelCase : Dict = wi.T
__lowerCamelCase : List[Any] = wo.T
__lowerCamelCase : Any = old[
"encoder/relpos_bias/rel_embedding"
].T
__lowerCamelCase : List[str] = old["encoder/encoder_norm/scale"]
if not is_encoder_only:
# Decoder.
for i in range(_lowerCamelCase ):
# Block i, layer 0 (Self Attention).
__lowerCamelCase : str = tax_layer_norm_lookup(_lowerCamelCase , _lowerCamelCase , "decoder" , "pre_self_attention_layer_norm" )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[str] = tax_attention_lookup(_lowerCamelCase , _lowerCamelCase , "decoder" , "self_attention" )
__lowerCamelCase : Any = layer_norm
__lowerCamelCase : Optional[int] = k.T
__lowerCamelCase : Any = o.T
__lowerCamelCase : List[Any] = q.T
__lowerCamelCase : int = v.T
# Block i, layer 1 (Cross Attention).
__lowerCamelCase : Tuple = tax_layer_norm_lookup(_lowerCamelCase , _lowerCamelCase , "decoder" , "pre_cross_attention_layer_norm" )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[str] = tax_attention_lookup(_lowerCamelCase , _lowerCamelCase , "decoder" , "encoder_decoder_attention" )
__lowerCamelCase : Dict = layer_norm
__lowerCamelCase : Optional[int] = k.T
__lowerCamelCase : Tuple = o.T
__lowerCamelCase : List[Any] = q.T
__lowerCamelCase : List[str] = v.T
# Block i, layer 2 (MLP).
__lowerCamelCase : Union[str, Any] = tax_layer_norm_lookup(_lowerCamelCase , _lowerCamelCase , "decoder" , "pre_mlp_layer_norm" )
__lowerCamelCase , __lowerCamelCase : List[str] = tax_mlp_lookup(_lowerCamelCase , _lowerCamelCase , "decoder" , _lowerCamelCase )
__lowerCamelCase : Dict = layer_norm
if split_mlp_wi:
__lowerCamelCase : List[str] = wi[0].T
__lowerCamelCase : Union[str, Any] = wi[1].T
else:
__lowerCamelCase : Tuple = wi.T
__lowerCamelCase : List[Any] = wo.T
__lowerCamelCase : Union[str, Any] = old["decoder/decoder_norm/scale"]
__lowerCamelCase : Union[str, Any] = old[
"decoder/relpos_bias/rel_embedding"
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__lowerCamelCase : Union[str, Any] = old["decoder/logits_dense/kernel"].T
return new
def lowercase_ ( _lowerCamelCase: Dict , _lowerCamelCase: bool ) -> Any:
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__lowerCamelCase : Optional[int] = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__lowerCamelCase : Union[str, Any] = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
__lowerCamelCase : List[Any] = state_dict["shared.weight"]
return state_dict
def lowercase_ ( _lowerCamelCase: List[Any] , _lowerCamelCase: Optional[int] , _lowerCamelCase: str , _lowerCamelCase: str ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase : Dict = checkpoints.load_tax_checkpoint(_lowerCamelCase )
__lowerCamelCase : str = convert_tax_to_pytorch(_lowerCamelCase , num_layers=config.num_layers , is_encoder_only=_lowerCamelCase )
__lowerCamelCase : Any = make_state_dict(_lowerCamelCase , _lowerCamelCase )
model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
def lowercase_ ( _lowerCamelCase: Any , _lowerCamelCase: Tuple , _lowerCamelCase: Optional[Any] , _lowerCamelCase: bool = False ) -> Tuple:
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = TaConfig.from_json_file(_lowerCamelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__lowerCamelCase : Optional[Any] = TaEncoderModel(_lowerCamelCase )
else:
__lowerCamelCase : str = TaForConditionalGeneration(_lowerCamelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(_lowerCamelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(_lowerCamelCase )
print("Done" )
if __name__ == "__main__":
__A = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
__A = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
) | 135 |
from __future__ import annotations
def __magic_name__ ( A : list ):
'''simple docstring'''
if len(A ) == 0:
return []
a , a = min(A ), max(A )
a = int(max_value - min_value ) + 1
a = [[] for _ in range(A )]
for i in my_list:
buckets[int(i - min_value )].append(A )
return [v for bucket in buckets for v in sorted(A )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 107 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
def __lowerCamelCase ( a_ : int ) -> Optional[int]:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowerCamelCase ( a_ : int ) -> Dict:
__SCREAMING_SNAKE_CASE :str = str(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE :int = [n]
for i in range(1 , len(lowerCAmelCase__ ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def __lowerCamelCase ( a_ : int ) -> List[str]:
if len(str(lowerCAmelCase__ ) ) > 3:
if not is_prime(int(str(lowerCAmelCase__ )[-3:] ) ) or not is_prime(int(str(lowerCAmelCase__ )[:3] ) ):
return False
return True
def __lowerCamelCase ( a_ : int = 11 ) -> Dict:
__SCREAMING_SNAKE_CASE :list[int] = []
__SCREAMING_SNAKE_CASE :Union[str, Any] = 13
while len(lowerCAmelCase__ ) != count:
if validate(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE :int = list_truncated_nums(lowerCAmelCase__ )
if all(is_prime(lowerCAmelCase__ ) for i in list_nums ):
list_truncated_primes.append(lowerCAmelCase__ )
num += 2
return list_truncated_primes
def __lowerCamelCase ( ) -> List[Any]:
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(f'{sum(compute_truncated_primes(1_1)) = }')
| 366 |
"""simple docstring"""
from itertools import product
def __lowerCamelCase ( a_ : int , a_ : int ) -> list[int]:
__SCREAMING_SNAKE_CASE :Tuple = sides_number
__SCREAMING_SNAKE_CASE :List[Any] = max_face_number * dice_number
__SCREAMING_SNAKE_CASE :List[Any] = [0] * (max_total + 1)
__SCREAMING_SNAKE_CASE :Optional[int] = 1
__SCREAMING_SNAKE_CASE :Tuple = range(a_ , max_face_number + 1 )
for dice_numbers in product(a_ , repeat=a_ ):
__SCREAMING_SNAKE_CASE :Any = sum(a_ )
totals_frequencies[total] += 1
return totals_frequencies
def __lowerCamelCase ( ) -> float:
__SCREAMING_SNAKE_CASE :Dict = total_frequency_distribution(
sides_number=4 , dice_number=9 )
__SCREAMING_SNAKE_CASE :Union[str, Any] = total_frequency_distribution(
sides_number=6 , dice_number=6 )
__SCREAMING_SNAKE_CASE :Optional[Any] = 0
__SCREAMING_SNAKE_CASE :Any = 9
__SCREAMING_SNAKE_CASE :List[str] = 4 * 9
__SCREAMING_SNAKE_CASE :Dict = 6
for peter_total in range(a_ , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
__SCREAMING_SNAKE_CASE :List[str] = (4**9) * (6**6)
__SCREAMING_SNAKE_CASE :Union[str, Any] = peter_wins_count / total_games_number
__SCREAMING_SNAKE_CASE :str = round(a_ , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f'{solution() = }') | 239 | 0 |
"""simple docstring"""
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
_lowercase = logging.get_logger(__name__)
# General docstring
_lowercase = '''ResNetConfig'''
# Base docstring
_lowercase = '''microsoft/resnet-50'''
_lowercase = [1, 20_48, 7, 7]
# Image classification docstring
_lowercase = '''microsoft/resnet-50'''
_lowercase = '''tiger cat'''
_lowercase = [
'''microsoft/resnet-50''',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] ,A_ : int ,A_ : int ,A_ : int = 3 ,A_ : int = 1 ,A_ : str = "relu" ) -> Tuple:
super().__init__()
A = nn.Convad(
A_ ,A_ ,kernel_size=A_ ,stride=A_ ,padding=kernel_size // 2 ,bias=A_ )
A = nn.BatchNormad(A_ )
A = ACTaFN[activation] if activation is not None else nn.Identity()
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Tensor ) -> Tensor:
A = self.convolution(A_ )
A = self.normalization(A_ )
A = self.activation(A_ )
return hidden_state
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] ,A_ : ResNetConfig ) -> Union[str, Any]:
super().__init__()
A = ResNetConvLayer(
config.num_channels ,config.embedding_size ,kernel_size=7 ,stride=2 ,activation=config.hidden_act )
A = nn.MaxPoolad(kernel_size=3 ,stride=2 ,padding=1 )
A = config.num_channels
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Tensor ) -> Tensor:
A = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
A = self.embedder(A_ )
A = self.pooler(A_ )
return embedding
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] ,A_ : int ,A_ : int ,A_ : int = 2 ) -> Optional[int]:
super().__init__()
A = nn.Convad(A_ ,A_ ,kernel_size=1 ,stride=A_ ,bias=A_ )
A = nn.BatchNormad(A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Tensor ) -> Tensor:
A = self.convolution(A_ )
A = self.normalization(A_ )
return hidden_state
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : str ,A_ : int ,A_ : int ,A_ : int = 1 ,A_ : str = "relu" ) -> Dict:
super().__init__()
A = in_channels != out_channels or stride != 1
A = (
ResNetShortCut(A_ ,A_ ,stride=A_ ) if should_apply_shortcut else nn.Identity()
)
A = nn.Sequential(
ResNetConvLayer(A_ ,A_ ,stride=A_ ) ,ResNetConvLayer(A_ ,A_ ,activation=A_ ) ,)
A = ACTaFN[activation]
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, Any] ) -> Optional[Any]:
A = hidden_state
A = self.layer(A_ )
A = self.shortcut(A_ )
hidden_state += residual
A = self.activation(A_ )
return hidden_state
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] ,A_ : int ,A_ : int ,A_ : int = 1 ,A_ : str = "relu" ,A_ : int = 4 ) -> Dict:
super().__init__()
A = in_channels != out_channels or stride != 1
A = out_channels // reduction
A = (
ResNetShortCut(A_ ,A_ ,stride=A_ ) if should_apply_shortcut else nn.Identity()
)
A = nn.Sequential(
ResNetConvLayer(A_ ,A_ ,kernel_size=1 ) ,ResNetConvLayer(A_ ,A_ ,stride=A_ ) ,ResNetConvLayer(A_ ,A_ ,kernel_size=1 ,activation=A_ ) ,)
A = ACTaFN[activation]
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ) -> List[Any]:
A = hidden_state
A = self.layer(A_ )
A = self.shortcut(A_ )
hidden_state += residual
A = self.activation(A_ )
return hidden_state
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] ,A_ : ResNetConfig ,A_ : int ,A_ : int ,A_ : int = 2 ,A_ : int = 2 ,) -> Tuple:
super().__init__()
A = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer
A = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(A_ ,A_ ,stride=A_ ,activation=config.hidden_act ) ,*[layer(A_ ,A_ ,activation=config.hidden_act ) for _ in range(depth - 1 )] ,)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Tensor ) -> Tensor:
A = input
for layer in self.layers:
A = layer(A_ )
return hidden_state
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] ,A_ : ResNetConfig ) -> Dict:
super().__init__()
A = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
A_ ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,) )
A = zip(config.hidden_sizes ,config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(A_ ,config.depths[1:] ):
self.stages.append(ResNetStage(A_ ,A_ ,A_ ,depth=A_ ) )
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : Tensor ,A_ : bool = False ,A_ : bool = True ) -> BaseModelOutputWithNoAttention:
A = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
A = hidden_states + (hidden_state,)
A = stage_module(A_ )
if output_hidden_states:
A = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=A_ ,hidden_states=A_ ,)
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: int = ResNetConfig
_lowerCamelCase: List[Any] = '''resnet'''
_lowerCamelCase: int = '''pixel_values'''
_lowerCamelCase: int = True
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Any ) -> Any:
if isinstance(A_ ,nn.Convad ):
nn.init.kaiming_normal_(module.weight ,mode='fan_out' ,nonlinearity='relu' )
elif isinstance(A_ ,(nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight ,1 )
nn.init.constant_(module.bias ,0 )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[Any] ,A_ : Tuple=False ) -> str:
if isinstance(A_ ,A_ ):
A = value
_lowercase = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
_lowercase = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'''The bare ResNet model outputting raw features without any specific head on top.''' , _lowercase , )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def __init__( self : List[Any] ,A_ : Dict ) -> Dict:
super().__init__(A_ )
A = config
A = ResNetEmbeddings(A_ )
A = ResNetEncoder(A_ )
A = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(A_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=A_ ,config_class=_CONFIG_FOR_DOC ,modality='vision' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Tensor ,A_ : Optional[bool] = None ,A_ : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention:
A = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A = return_dict if return_dict is not None else self.config.use_return_dict
A = self.embedder(A_ )
A = self.encoder(
A_ ,output_hidden_states=A_ ,return_dict=A_ )
A = encoder_outputs[0]
A = self.pooler(A_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=A_ ,pooler_output=A_ ,hidden_states=encoder_outputs.hidden_states ,)
@add_start_docstrings(
'''
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , _lowercase , )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def __init__( self : List[Any] ,A_ : Any ) -> List[Any]:
super().__init__(A_ )
A = config.num_labels
A = ResNetModel(A_ )
# classification head
A = nn.Sequential(
nn.Flatten() ,nn.Linear(config.hidden_sizes[-1] ,config.num_labels ) if config.num_labels > 0 else nn.Identity() ,)
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(A_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=A_ ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[torch.FloatTensor] = None ,A_ : Optional[torch.LongTensor] = None ,A_ : Optional[bool] = None ,A_ : Optional[bool] = None ,) -> ImageClassifierOutputWithNoAttention:
A = return_dict if return_dict is not None else self.config.use_return_dict
A = self.resnet(A_ ,output_hidden_states=A_ ,return_dict=A_ )
A = outputs.pooler_output if return_dict else outputs[1]
A = self.classifier(A_ )
A = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
A = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
A = 'single_label_classification'
else:
A = 'multi_label_classification'
if self.config.problem_type == "regression":
A = MSELoss()
if self.num_labels == 1:
A = loss_fct(logits.squeeze() ,labels.squeeze() )
else:
A = loss_fct(A_ ,A_ )
elif self.config.problem_type == "single_label_classification":
A = CrossEntropyLoss()
A = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
A = BCEWithLogitsLoss()
A = loss_fct(A_ ,A_ )
if not return_dict:
A = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=A_ ,logits=A_ ,hidden_states=outputs.hidden_states )
@add_start_docstrings(
'''
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
''' , _lowercase , )
class lowerCAmelCase_ ( _lowercase , _lowercase ):
'''simple docstring'''
def __init__( self : int ,A_ : Union[str, Any] ) -> Optional[int]:
super().__init__(A_ )
super()._init_backbone(A_ )
A = [config.embedding_size] + config.hidden_sizes
A = ResNetEmbeddings(A_ )
A = ResNetEncoder(A_ )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(A_ )
@replace_return_docstrings(output_type=A_ ,config_class=_CONFIG_FOR_DOC )
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Tensor ,A_ : Optional[bool] = None ,A_ : Optional[bool] = None ) -> BackboneOutput:
A = return_dict if return_dict is not None else self.config.use_return_dict
A = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A = self.embedder(A_ )
A = self.encoder(A_ ,output_hidden_states=A_ ,return_dict=A_ )
A = outputs.hidden_states
A = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
A = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=A_ ,hidden_states=outputs.hidden_states if output_hidden_states else None ,attentions=A_ ,) | 74 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
lowerCamelCase = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test'''])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
lowerCamelCase = '''https://storage.googleapis.com/cvdf-datasets/mnist/'''
def lowerCamelCase_ ( _a ):
"""simple docstring"""
lowerCAmelCase__ : Dict = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=_a )[0]
@deprecated(_a , '''Please use tf.data to implement this functionality.''' )
def lowerCamelCase_ ( _a ):
"""simple docstring"""
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=_a ) as bytestream:
lowerCAmelCase__ : Any = _readaa(_a )
if magic != 2_051:
raise ValueError(
'''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) )
lowerCAmelCase__ : Any = _readaa(_a )
lowerCAmelCase__ : Tuple = _readaa(_a )
lowerCAmelCase__ : List[Any] = _readaa(_a )
lowerCAmelCase__ : Union[str, Any] = bytestream.read(rows * cols * num_images )
lowerCAmelCase__ : List[Any] = numpy.frombuffer(_a , dtype=numpy.uinta )
lowerCAmelCase__ : int = data.reshape(_a , _a , _a , 1 )
return data
@deprecated(_a , '''Please use tf.one_hot on tensors.''' )
def lowerCamelCase_ ( _a , _a ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = labels_dense.shape[0]
lowerCAmelCase__ : Optional[Any] = numpy.arange(_a ) * num_classes
lowerCAmelCase__ : str = numpy.zeros((num_labels, num_classes) )
lowerCAmelCase__ : Optional[Any] = 1
return labels_one_hot
@deprecated(_a , '''Please use tf.data to implement this functionality.''' )
def lowerCamelCase_ ( _a , _a=False , _a=10 ):
"""simple docstring"""
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=_a ) as bytestream:
lowerCAmelCase__ : Optional[int] = _readaa(_a )
if magic != 2_049:
raise ValueError(
'''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) )
lowerCAmelCase__ : Union[str, Any] = _readaa(_a )
lowerCAmelCase__ : Tuple = bytestream.read(_a )
lowerCAmelCase__ : Dict = numpy.frombuffer(_a , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(_a , _a )
return labels
class _a :
@deprecated(
_SCREAMING_SNAKE_CASE , '''Please use alternatives such as official/mnist/_DataSet.py'''
''' from tensorflow/models.''' , )
def __init__( self : Dict , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Tuple=False , _SCREAMING_SNAKE_CASE : Any=False , _SCREAMING_SNAKE_CASE : Optional[Any]=dtypes.floataa , _SCREAMING_SNAKE_CASE : List[str]=True , _SCREAMING_SNAKE_CASE : List[str]=None , )-> List[Any]:
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = random_seed.get_seed(_SCREAMING_SNAKE_CASE )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
lowerCAmelCase__ : Optional[int] = dtypes.as_dtype(_SCREAMING_SNAKE_CASE ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype )
if fake_data:
lowerCAmelCase__ : int = 1_0000
lowerCAmelCase__ : List[Any] = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), F'images.shape: {images.shape} labels.shape: {labels.shape}'
lowerCAmelCase__ : List[Any] = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
lowerCAmelCase__ : Tuple = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
lowerCAmelCase__ : Any = images.astype(numpy.floataa )
lowerCAmelCase__ : Any = numpy.multiply(_SCREAMING_SNAKE_CASE , 1.0 / 255.0 )
lowerCAmelCase__ : Tuple = images
lowerCAmelCase__ : Tuple = labels
lowerCAmelCase__ : List[Any] = 0
lowerCAmelCase__ : Tuple = 0
@property
def UpperCAmelCase__( self : Tuple )-> Dict:
return self._images
@property
def UpperCAmelCase__( self : Tuple )-> Optional[int]:
return self._labels
@property
def UpperCAmelCase__( self : Tuple )-> Dict:
return self._num_examples
@property
def UpperCAmelCase__( self : Tuple )-> Any:
return self._epochs_completed
def UpperCAmelCase__( self : List[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Dict=False , _SCREAMING_SNAKE_CASE : Optional[int]=True )-> List[str]:
if fake_data:
lowerCAmelCase__ : Dict = [1] * 784
lowerCAmelCase__ : Union[str, Any] = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(_SCREAMING_SNAKE_CASE )],
[fake_label for _ in range(_SCREAMING_SNAKE_CASE )],
)
lowerCAmelCase__ : str = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
lowerCAmelCase__ : Union[str, Any] = numpy.arange(self._num_examples )
numpy.random.shuffle(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[Any] = self.images[perma]
lowerCAmelCase__ : Tuple = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
lowerCAmelCase__ : Any = self._num_examples - start
lowerCAmelCase__ : List[str] = self._images[start : self._num_examples]
lowerCAmelCase__ : Tuple = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
lowerCAmelCase__ : Union[str, Any] = numpy.arange(self._num_examples )
numpy.random.shuffle(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : str = self.images[perm]
lowerCAmelCase__ : List[Any] = self.labels[perm]
# Start next epoch
lowerCAmelCase__ : Dict = 0
lowerCAmelCase__ : Union[str, Any] = batch_size - rest_num_examples
lowerCAmelCase__ : Any = self._index_in_epoch
lowerCAmelCase__ : Optional[Any] = self._images[start:end]
lowerCAmelCase__ : Optional[Any] = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
lowerCAmelCase__ : Dict = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(_a , '''Please write your own downloading logic.''' )
def lowerCamelCase_ ( _a , _a , _a ):
"""simple docstring"""
if not gfile.Exists(_a ):
gfile.MakeDirs(_a )
lowerCAmelCase__ : str = os.path.join(_a , _a )
if not gfile.Exists(_a ):
urllib.request.urlretrieve(_a , _a ) # noqa: S310
with gfile.GFile(_a ) as f:
lowerCAmelCase__ : Optional[Any] = f.size()
print('''Successfully downloaded''' , _a , _a , '''bytes.''' )
return filepath
@deprecated(
_a , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' )
def lowerCamelCase_ ( _a , _a=False , _a=False , _a=dtypes.floataa , _a=True , _a=5_000 , _a=None , _a=DEFAULT_SOURCE_URL , ):
"""simple docstring"""
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=_a , one_hot=_a , dtype=_a , seed=_a )
lowerCAmelCase__ : Tuple = fake()
lowerCAmelCase__ : Union[str, Any] = fake()
lowerCAmelCase__ : Tuple = fake()
return _Datasets(train=_a , validation=_a , test=_a )
if not source_url: # empty string check
lowerCAmelCase__ : Optional[Any] = DEFAULT_SOURCE_URL
lowerCAmelCase__ : Tuple = '''train-images-idx3-ubyte.gz'''
lowerCAmelCase__ : Dict = '''train-labels-idx1-ubyte.gz'''
lowerCAmelCase__ : List[str] = '''t10k-images-idx3-ubyte.gz'''
lowerCAmelCase__ : Optional[int] = '''t10k-labels-idx1-ubyte.gz'''
lowerCAmelCase__ : Optional[Any] = _maybe_download(
_a , _a , source_url + train_images_file )
with gfile.Open(_a , '''rb''' ) as f:
lowerCAmelCase__ : Optional[Any] = _extract_images(_a )
lowerCAmelCase__ : Any = _maybe_download(
_a , _a , source_url + train_labels_file )
with gfile.Open(_a , '''rb''' ) as f:
lowerCAmelCase__ : Any = _extract_labels(_a , one_hot=_a )
lowerCAmelCase__ : Any = _maybe_download(
_a , _a , source_url + test_images_file )
with gfile.Open(_a , '''rb''' ) as f:
lowerCAmelCase__ : str = _extract_images(_a )
lowerCAmelCase__ : Dict = _maybe_download(
_a , _a , source_url + test_labels_file )
with gfile.Open(_a , '''rb''' ) as f:
lowerCAmelCase__ : int = _extract_labels(_a , one_hot=_a )
if not 0 <= validation_size <= len(_a ):
lowerCAmelCase__ : Dict = (
'''Validation size should be between 0 and '''
f'{len(_a )}. Received: {validation_size}.'
)
raise ValueError(_a )
lowerCAmelCase__ : List[str] = train_images[:validation_size]
lowerCAmelCase__ : Any = train_labels[:validation_size]
lowerCAmelCase__ : Optional[Any] = train_images[validation_size:]
lowerCAmelCase__ : Optional[int] = train_labels[validation_size:]
lowerCAmelCase__ : Optional[Any] = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed}
lowerCAmelCase__ : List[str] = _DataSet(_a , _a , **_a )
lowerCAmelCase__ : Dict = _DataSet(_a , _a , **_a )
lowerCAmelCase__ : Dict = _DataSet(_a , _a , **_a )
return _Datasets(train=_a , validation=_a , test=_a )
| 131 | 0 |
'''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
_snake_case = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class a__ :
def __init__( self , _UpperCamelCase , _UpperCamelCase=16 , _UpperCamelCase=13 , _UpperCamelCase=7 , _UpperCamelCase=14 , _UpperCamelCase=10 , _UpperCamelCase=19 , _UpperCamelCase=5 , _UpperCamelCase=4 , _UpperCamelCase=True , _UpperCamelCase=16 , _UpperCamelCase=2 , _UpperCamelCase=4 , _UpperCamelCase=4 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=[1, 2, 3, 4, 5] , _UpperCamelCase=25 , _UpperCamelCase=5 , ):
"""simple docstring"""
_lowercase : str = d_model
_lowercase : Dict = parent
_lowercase : List[str] = batch_size
_lowercase : Any = prediction_length
_lowercase : Any = context_length
_lowercase : str = cardinality
_lowercase : Tuple = num_time_features
_lowercase : List[str] = lags_sequence
_lowercase : Tuple = embedding_dimension
_lowercase : Optional[int] = is_training
_lowercase : Any = hidden_size
_lowercase : str = num_hidden_layers
_lowercase : int = num_attention_heads
_lowercase : int = intermediate_size
_lowercase : Any = hidden_act
_lowercase : List[Any] = hidden_dropout_prob
_lowercase : Any = attention_probs_dropout_prob
_lowercase : int = context_length
_lowercase : Optional[int] = prediction_length + label_length
_lowercase : Union[str, Any] = label_length
_lowercase : int = moving_average
_lowercase : Dict = autocorrelation_factor
def _lowerCamelCase ( self ):
"""simple docstring"""
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
_lowercase : Optional[Any] = config.context_length + max(config.lags_sequence )
_lowercase : Optional[Any] = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
_lowercase : Dict = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
_lowercase : Union[str, Any] = floats_tensor([self.batch_size, _past_length] )
_lowercase : Dict = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
_lowercase : Dict = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
_lowercase : Optional[Any] = floats_tensor([self.batch_size, config.prediction_length] )
_lowercase : Union[str, Any] = {
"past_values": past_values,
"static_categorical_features": static_categorical_features,
"past_time_features": past_time_features,
"past_observed_mask": past_observed_mask,
"future_time_features": future_time_features,
"future_values": future_values,
}
return inputs_dict
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Dict = self.get_config()
_lowercase : Optional[Any] = self.prepare_autoformer_inputs_dict(__snake_case )
return config, inputs_dict
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase , _lowercase : str = self.prepare_config_and_inputs()
return config, inputs_dict
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase : str = AutoformerModel(config=__snake_case ).to(__snake_case ).eval()
_lowercase : Tuple = model(**__snake_case )
_lowercase : List[str] = outputs.encoder_last_hidden_state
_lowercase : List[Any] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase : Union[str, Any] = model.get_encoder()
encoder.save_pretrained(__snake_case )
_lowercase : int = AutoformerEncoder.from_pretrained(__snake_case ).to(__snake_case )
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : List[str] = model.create_network_inputs(**__snake_case )
_lowercase , _lowercase : Optional[int] = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
_lowercase : Any = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
_lowercase : List[Any] = encoder(inputs_embeds=__snake_case )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
_lowercase : Tuple = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
_lowercase : List[Any] = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
_lowercase : Tuple = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
_lowercase : Any = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase : Optional[Any] = model.get_decoder()
decoder.save_pretrained(__snake_case )
_lowercase : List[str] = AutoformerDecoder.from_pretrained(__snake_case ).to(__snake_case )
_lowercase : str = decoder(
trend=__snake_case , inputs_embeds=__snake_case , encoder_hidden_states=__snake_case , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class a__ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Tuple = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE : Optional[Any] = (AutoformerForPrediction,) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE : Dict = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {}
_SCREAMING_SNAKE_CASE : Optional[Any] = False
_SCREAMING_SNAKE_CASE : Tuple = False
_SCREAMING_SNAKE_CASE : str = False
_SCREAMING_SNAKE_CASE : List[str] = False
_SCREAMING_SNAKE_CASE : Optional[Any] = False
_SCREAMING_SNAKE_CASE : List[Any] = False
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Union[str, Any] = AutoformerModelTester(self )
_lowercase : Union[str, Any] = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case )
def _lowerCamelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase , _lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_lowercase : Optional[int] = model_class(__snake_case )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__snake_case )
_lowercase , _lowercase : int = model_class.from_pretrained(__snake_case , output_loading_info=__snake_case )
self.assertEqual(info["missing_keys"] , [] )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__snake_case )
@unittest.skip(reason="Model has no tokens embeddings" )
def _lowerCamelCase ( self ):
"""simple docstring"""
pass
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Union[str, Any] = inspect.signature(getattr(__snake_case , "forward" ) )
# The main input is the name of the argument after `self`
_lowercase : List[str] = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , __snake_case )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase , _lowercase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : int = model_class(__snake_case )
_lowercase : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase : Dict = [*signature.parameters.keys()]
_lowercase : Optional[int] = [
"past_values",
"past_time_features",
"past_observed_mask",
"static_categorical_features",
"static_real_features",
"future_values",
"future_time_features",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("future_observed_mask" )
expected_arg_names.extend(
[
"decoder_attention_mask",
"head_mask",
"decoder_head_mask",
"cross_attn_head_mask",
"encoder_outputs",
"past_key_values",
"output_hidden_states",
"output_attentions",
"use_cache",
"return_dict",
] )
self.assertListEqual(arg_names[: len(__snake_case )] , __snake_case )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase , _lowercase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase : Optional[int] = True
_lowercase : List[Any] = getattr(self.model_tester , "seq_length" , __snake_case )
_lowercase : str = getattr(self.model_tester , "decoder_seq_length" , __snake_case )
_lowercase : Any = getattr(self.model_tester , "encoder_seq_length" , __snake_case )
_lowercase : Optional[int] = getattr(self.model_tester , "d_model" , __snake_case )
_lowercase : Tuple = getattr(self.model_tester , "num_attention_heads" , __snake_case )
_lowercase : Tuple = d_model // num_attention_heads
for model_class in self.all_model_classes:
_lowercase : List[Any] = True
_lowercase : List[Any] = False
_lowercase : List[Any] = True
_lowercase : Any = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
_lowercase : Tuple = model(**self._prepare_for_class(__snake_case , __snake_case ) )
_lowercase : str = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_lowercase : Any = True
_lowercase : Optional[Any] = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
_lowercase : Dict = model(**self._prepare_for_class(__snake_case , __snake_case ) )
_lowercase : List[str] = outputs.encoder_attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
_lowercase : Dict = len(__snake_case )
_lowercase : Any = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(__snake_case , __snake_case )
# decoder attentions
_lowercase : Optional[int] = outputs.decoder_attentions
self.assertIsInstance(__snake_case , (list, tuple) )
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
_lowercase : Optional[Any] = outputs.cross_attentions
self.assertIsInstance(__snake_case , (list, tuple) )
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
_lowercase : Dict = True
_lowercase : Union[str, Any] = True
_lowercase : str = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
_lowercase : Union[str, Any] = model(**self._prepare_for_class(__snake_case , __snake_case ) )
self.assertEqual(out_len + 2 , len(__snake_case ) )
_lowercase : List[str] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def _lowerCamelCase ( self ):
"""simple docstring"""
super().test_retain_grad_hidden_states_attentions()
def _A ( snake_case="train-batch.pt" ) -> int:
_lowercase : int = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" , filename=__lowerCAmelCase , repo_type="dataset" )
_lowercase : str = torch.load(__lowerCAmelCase , map_location=__lowerCAmelCase )
return batch
@require_torch
@slow
class a__ ( unittest.TestCase ):
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[str] = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(__snake_case )
_lowercase : str = prepare_batch()
with torch.no_grad():
_lowercase : Any = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0]
_lowercase : Union[str, Any] = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , __snake_case )
_lowercase : List[str] = torch.tensor(
[[0.3_5_9_3, -1.3_3_9_8, 0.6_3_3_0], [0.2_2_7_9, 1.5_3_9_6, -0.1_7_9_2], [0.0_4_5_0, 1.3_2_2_5, -0.2_3_3_5]] , device=__snake_case )
self.assertTrue(torch.allclose(output[0, :3, :3] , __snake_case , atol=__snake_case ) )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Tuple = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(__snake_case )
_lowercase : List[str] = prepare_batch("val-batch.pt" )
with torch.no_grad():
_lowercase : Any = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state
_lowercase : Optional[Any] = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , __snake_case )
_lowercase : Tuple = torch.tensor(
[[-0.0_7_3_4, -0.9_0_3_6, 0.8_3_5_8], [4.7_1_8_6, 2.4_1_1_3, 1.9_5_8_1], [1.7_9_5_3, 2.3_5_5_8, 1.2_9_7_0]] , device=__snake_case )
self.assertTrue(torch.allclose(output[0, :3, :3] , __snake_case , atol=__snake_case ) )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[Any] = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(__snake_case )
_lowercase : Dict = prepare_batch("val-batch.pt" )
with torch.no_grad():
_lowercase : List[str] = model.generate(
static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , )
_lowercase : Dict = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , __snake_case )
_lowercase : Dict = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=__snake_case )
_lowercase : Optional[Any] = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , __snake_case , rtol=1E-1 ) )
| 371 |
'''simple docstring'''
from timeit import timeit
def _A ( snake_case ) -> int:
if number < 0:
raise ValueError("the value of input must not be negative" )
_lowercase : Union[str, Any] = 0
while number:
number &= number - 1
result += 1
return result
def _A ( snake_case ) -> int:
if number < 0:
raise ValueError("the value of input must not be negative" )
_lowercase : int = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def _A ( ) -> None:
def do_benchmark(snake_case ) -> None:
_lowercase : Optional[int] = "import __main__ as z"
print(F'''Benchmark when {number = }:''' )
print(F'''{get_set_bits_count_using_modulo_operator(snake_case ) = }''' )
_lowercase : int = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=snake_case )
print(F'''timeit() runs in {timing} seconds''' )
print(F'''{get_set_bits_count_using_brian_kernighans_algorithm(snake_case ) = }''' )
_lowercase : Optional[int] = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=snake_case , )
print(F'''timeit() runs in {timing} seconds''' )
for number in (25, 37, 58, 0):
do_benchmark(snake_case )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 199 | 0 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : str = CLIPTokenizer
UpperCAmelCase__ : Any = CLIPTokenizerFast
UpperCAmelCase__ : List[str] = True
UpperCAmelCase__ : Dict = {}
UpperCAmelCase__ : Dict = False
def lowerCAmelCase__ ( self: int ):
super().setUp()
# fmt: off
__lowerCamelCase = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
__lowerCamelCase = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
__lowerCamelCase = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>"""]
__lowerCamelCase = {"""unk_token""": """<unk>"""}
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCamelCase_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCamelCase_ ) )
def lowerCAmelCase__ ( self: Dict , **UpperCamelCase_: List[Any] ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Any , **UpperCamelCase_: str ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Any ):
__lowerCamelCase = """lower newer"""
__lowerCamelCase = """lower newer"""
return input_text, output_text
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowerCamelCase = """lower newer"""
__lowerCamelCase = ["""lo""", """w""", """er</w>""", """n""", """e""", """w""", """er</w>"""]
__lowerCamelCase = tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = tokens + [tokenizer.unk_token]
__lowerCamelCase = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , UpperCamelCase_ )
@require_ftfy
def lowerCAmelCase__ ( self: Optional[int] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__lowerCamelCase = self.tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
__lowerCamelCase = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
__lowerCamelCase = """A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."""
__lowerCamelCase = tokenizer_s.tokenize(UpperCamelCase_ )
__lowerCamelCase = tokenizer_r.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
__lowerCamelCase = """xa\u0303y""" + """ """ + """x\xe3y"""
__lowerCamelCase = tokenizer_s.tokenize(UpperCamelCase_ )
__lowerCamelCase = tokenizer_r.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
# Test that the tokenization is identical on unicode of space type
__lowerCamelCase = [
"""\u0009""", # (horizontal tab, '\t')
"""\u000B""", # (vertical tab)
"""\u000C""", # (form feed)
"""\u0020""", # (space, ' ')
"""\u200E""", # (left-to-right mark):w
"""\u200F""", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
__lowerCamelCase = tokenizer_s.tokenize(UpperCamelCase_ )
__lowerCamelCase = tokenizer_r.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
# Test that the tokenization is identical on unicode of line break type
__lowerCamelCase = [
"""\u000A""", # (line feed, '\n')
"""\r\n""", # (carriage return and line feed, '\r\n')
"""\u000D""", # (carriage return, '\r')
"""\r""", # (carriage return, '\r')
"""\u000D""", # (carriage return, '\r')
"""\u2028""", # (line separator)
"""\u2029""", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
__lowerCamelCase = tokenizer_s.tokenize(UpperCamelCase_ )
__lowerCamelCase = tokenizer_r.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__lowerCamelCase = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
__lowerCamelCase = F'{text_of_1_token} {text_of_1_token}'
__lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase_ , use_fast=UpperCamelCase_ , )
__lowerCamelCase = tokenizer_r(UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase_ ) + 1, len(UpperCamelCase_ ) + 1 + len(UpperCamelCase_ )) , )
__lowerCamelCase = F' {text}'
__lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase_ , use_fast=UpperCamelCase_ , )
__lowerCamelCase = tokenizer_r(UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(UpperCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase_ ) + 1, 1 + len(UpperCamelCase_ ) + 1 + len(UpperCamelCase_ )) , )
def lowerCAmelCase__ ( self: int ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(UpperCamelCase_ ) as context:
self.rust_tokenizer_class.from_pretrained("""robot-test/old-clip-tokenizer""" )
self.assertTrue(
context.exception.args[0].startswith(
"""The `backend_tokenizer` provided does not match the expected format.""" ) )
@require_ftfy
def lowerCAmelCase__ ( self: Tuple ):
super().test_tokenization_python_rust_equals()
def lowerCAmelCase__ ( self: Any ):
# CLIP always lower cases letters
pass
| 12 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Union[str, Any] = StableDiffusionInpaintPipeline
UpperCAmelCase__ : Dict = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
UpperCAmelCase__ : str = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCAmelCase__ : int = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCAmelCase__ : Union[str, Any] = frozenset([])
def lowerCAmelCase__ ( self: str ):
torch.manual_seed(0 )
__lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCamelCase_ , )
__lowerCamelCase = PNDMScheduler(skip_prk_steps=UpperCamelCase_ )
torch.manual_seed(0 )
__lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
__lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
__lowerCamelCase = CLIPTextModel(UpperCamelCase_ )
__lowerCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowerCamelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: List[Any]=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCamelCase = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert("""RGB""" ).resize((64, 64) )
__lowerCamelCase = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((64, 64) )
if str(UpperCamelCase_ ).startswith("""mps""" ):
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
else:
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowerCamelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": init_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = StableDiffusionInpaintPipeline(**UpperCamelCase_ )
__lowerCamelCase = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = self.get_dummy_inputs(UpperCamelCase_ )
__lowerCamelCase = sd_pipe(**UpperCamelCase_ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self: int ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
__lowerCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
__lowerCamelCase = """stabilityai/stable-diffusion-2-inpainting"""
__lowerCamelCase = StableDiffusionInpaintPipeline.from_pretrained(UpperCamelCase_ , safety_checker=UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
__lowerCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type="""np""" , )
__lowerCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
__lowerCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
__lowerCamelCase = """stabilityai/stable-diffusion-2-inpainting"""
__lowerCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
UpperCamelCase_ , torch_dtype=torch.floataa , safety_checker=UpperCamelCase_ , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
__lowerCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type="""np""" , )
__lowerCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def lowerCAmelCase__ ( self: int ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
__lowerCamelCase = """stabilityai/stable-diffusion-2-inpainting"""
__lowerCamelCase = PNDMScheduler.from_pretrained(UpperCamelCase_ , subfolder="""scheduler""" )
__lowerCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
UpperCamelCase_ , safety_checker=UpperCamelCase_ , scheduler=UpperCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__lowerCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=2 , output_type="""np""" , )
__lowerCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 12 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = ['image_processor', 'tokenizer']
__UpperCamelCase = 'BlipImageProcessor'
__UpperCamelCase = ('BertTokenizer', 'BertTokenizerFast')
def __init__(self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = False
super().__init__(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = self.image_processor
def __call__(self , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = True , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = 0 , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = False , lowerCamelCase = False , lowerCamelCase = False , lowerCamelCase = False , lowerCamelCase = False , lowerCamelCase = True , lowerCamelCase = None , **lowerCamelCase , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
_lowerCAmelCase = self.tokenizer
_lowerCAmelCase = self.tokenizer(
text=lowerCamelCase , add_special_tokens=lowerCamelCase , padding=lowerCamelCase , truncation=lowerCamelCase , max_length=lowerCamelCase , stride=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_attention_mask=lowerCamelCase , return_overflowing_tokens=lowerCamelCase , return_special_tokens_mask=lowerCamelCase , return_offsets_mapping=lowerCamelCase , return_token_type_ids=lowerCamelCase , return_length=lowerCamelCase , verbose=lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase , )
return text_encoding
# add pixel_values
_lowerCAmelCase = self.image_processor(lowerCamelCase , return_tensors=lowerCamelCase )
if text is not None:
_lowerCAmelCase = self.tokenizer(
text=lowerCamelCase , add_special_tokens=lowerCamelCase , padding=lowerCamelCase , truncation=lowerCamelCase , max_length=lowerCamelCase , stride=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_attention_mask=lowerCamelCase , return_overflowing_tokens=lowerCamelCase , return_special_tokens_mask=lowerCamelCase , return_offsets_mapping=lowerCamelCase , return_token_type_ids=lowerCamelCase , return_length=lowerCamelCase , verbose=lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase , )
else:
_lowerCAmelCase = None
if text_encoding is not None:
encoding_image_processor.update(lowerCamelCase )
return encoding_image_processor
def A__ (self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase , **lowerCamelCase )
def A__ (self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase , **lowerCamelCase )
@property
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.tokenizer.model_input_names
_lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) | 317 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Any = {
'''transfo-xl-wt103''': '''https://huggingface.co/transfo-xl-wt103/resolve/main/config.json''',
}
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 'transfo-xl'
__UpperCamelCase = ['mems']
__UpperCamelCase = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__(self , lowerCamelCase=267_735 , lowerCamelCase=[20_000, 40_000, 200_000] , lowerCamelCase=1_024 , lowerCamelCase=1_024 , lowerCamelCase=16 , lowerCamelCase=64 , lowerCamelCase=4_096 , lowerCamelCase=4 , lowerCamelCase=False , lowerCamelCase=18 , lowerCamelCase=1_600 , lowerCamelCase=1_000 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=0 , lowerCamelCase=-1 , lowerCamelCase=True , lowerCamelCase=0.1 , lowerCamelCase=0.0 , lowerCamelCase=True , lowerCamelCase="normal" , lowerCamelCase=0.01 , lowerCamelCase=0.01 , lowerCamelCase=0.02 , lowerCamelCase=1e-5 , lowerCamelCase=0 , **lowerCamelCase , ):
'''simple docstring'''
_lowerCAmelCase = vocab_size
_lowerCAmelCase = []
self.cutoffs.extend(lowerCamelCase )
if proj_share_all_but_first:
_lowerCAmelCase = [False] + [True] * len(self.cutoffs )
else:
_lowerCAmelCase = [False] + [False] * len(self.cutoffs )
_lowerCAmelCase = d_model
_lowerCAmelCase = d_embed
_lowerCAmelCase = d_head
_lowerCAmelCase = d_inner
_lowerCAmelCase = div_val
_lowerCAmelCase = pre_lnorm
_lowerCAmelCase = n_layer
_lowerCAmelCase = n_head
_lowerCAmelCase = mem_len
_lowerCAmelCase = same_length
_lowerCAmelCase = attn_type
_lowerCAmelCase = clamp_len
_lowerCAmelCase = sample_softmax
_lowerCAmelCase = adaptive
_lowerCAmelCase = dropout
_lowerCAmelCase = dropatt
_lowerCAmelCase = untie_r
_lowerCAmelCase = init
_lowerCAmelCase = init_range
_lowerCAmelCase = proj_init_std
_lowerCAmelCase = init_std
_lowerCAmelCase = layer_norm_epsilon
super().__init__(eos_token_id=lowerCamelCase , **lowerCamelCase )
@property
def A__ (self ):
'''simple docstring'''
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def A__ (self , lowerCamelCase ):
'''simple docstring'''
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" ) | 317 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
a__ = logging.get_logger(__name__)
a__ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
a__ = {
"""vocab_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt""",
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-german-cased""": (
"""https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"""
),
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
},
}
a__ = {
"""distilbert-base-uncased""": 5_12,
"""distilbert-base-uncased-distilled-squad""": 5_12,
"""distilbert-base-cased""": 5_12,
"""distilbert-base-cased-distilled-squad""": 5_12,
"""distilbert-base-german-cased""": 5_12,
"""distilbert-base-multilingual-cased""": 5_12,
}
a__ = {
"""distilbert-base-uncased""": {"""do_lower_case""": True},
"""distilbert-base-uncased-distilled-squad""": {"""do_lower_case""": True},
"""distilbert-base-cased""": {"""do_lower_case""": False},
"""distilbert-base-cased-distilled-squad""": {"""do_lower_case""": False},
"""distilbert-base-german-cased""": {"""do_lower_case""": False},
"""distilbert-base-multilingual-cased""": {"""do_lower_case""": False},
}
class snake_case ( _a ):
'''simple docstring'''
snake_case_ : Tuple = VOCAB_FILES_NAMES
snake_case_ : str = PRETRAINED_VOCAB_FILES_MAP
snake_case_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ : List[str] = PRETRAINED_INIT_CONFIGURATION
snake_case_ : Optional[int] = ["""input_ids""", """attention_mask"""]
snake_case_ : Union[str, Any] = DistilBertTokenizer
def __init__( self : Dict , lowerCAmelCase : str=None , lowerCAmelCase : str=None , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Any="[UNK]" , lowerCAmelCase : Optional[int]="[SEP]" , lowerCAmelCase : Union[str, Any]="[PAD]" , lowerCAmelCase : Dict="[CLS]" , lowerCAmelCase : Tuple="[MASK]" , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : Any=None , **lowerCAmelCase : Optional[Any] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , )
_snake_case : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("""lowercase""" , snake_case_) != do_lower_case
or normalizer_state.get("""strip_accents""" , snake_case_) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , snake_case_) != tokenize_chinese_chars
):
_snake_case : List[str] = getattr(snake_case_ , normalizer_state.pop("""type"""))
_snake_case : Any = do_lower_case
_snake_case : Dict = strip_accents
_snake_case : str = tokenize_chinese_chars
_snake_case : Any = normalizer_class(**snake_case_)
_snake_case : Union[str, Any] = do_lower_case
def UpperCamelCase_ ( self : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any]=None) -> List[Any]:
"""simple docstring"""
_snake_case : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : str = None) -> List[str]:
"""simple docstring"""
_snake_case : int = [self.sep_token_id]
_snake_case : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def UpperCamelCase_ ( self : List[str] , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any] = None) -> List[str]:
"""simple docstring"""
_snake_case : Dict = self._tokenizer.model.save(snake_case_ , name=snake_case_)
return tuple(snake_case_)
| 317 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def _UpperCAmelCase ( _lowerCamelCase : Callable , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ) -> np.ndarray:
_lowerCAmelCase : Union[str, Any] = int(np.ceil((x_end - xa) / step_size ) )
_lowerCAmelCase : Tuple = np.zeros((n + 1,) )
_lowerCAmelCase : List[Any] = ya
_lowerCAmelCase : int = xa
for k in range(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = y[k] + step_size * ode_func(_lowerCamelCase , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 309 | 0 |
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
lowercase : Optional[Any] = 'src/transformers'
lowercase : Union[str, Any] = 'docs/source/en'
lowercase : Optional[int] = '.'
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int , _lowerCamelCase : List[str] , _lowerCamelCase : List[Any]) -> Any:
'''simple docstring'''
with open(_lowerCamelCase , "r" , encoding="utf-8" , newline="\n") as f:
__UpperCamelCase : List[Any] = f.readlines()
# Find the start prompt.
__UpperCamelCase : str = 0
while not lines[start_index].startswith(_lowerCamelCase):
start_index += 1
start_index += 1
__UpperCamelCase : List[Any] = start_index
while not lines[end_index].startswith(_lowerCamelCase):
end_index += 1
end_index -= 1
while len(lines[start_index]) <= 1:
start_index += 1
while len(lines[end_index]) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index]), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
lowercase : List[str] = 'Model|Encoder|Decoder|ForConditionalGeneration'
# Regexes that match TF/Flax/PT model names.
lowercase : List[Any] = re.compile(R'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
lowercase : Union[str, Any] = re.compile(R'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowercase : List[Any] = re.compile(R'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# This is to make sure the transformers module imported is the one in the repo.
lowercase : Any = direct_transformers_import(TRANSFORMERS_PATH)
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict) -> List[Any]:
'''simple docstring'''
__UpperCamelCase : List[Any] = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" , _lowerCamelCase)
return [m.group(0) for m in matches]
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[Any]) -> int:
'''simple docstring'''
__UpperCamelCase : List[Any] = 2 if text == "✅" or text == "❌" else len(_lowerCamelCase)
__UpperCamelCase : Union[str, Any] = (width - text_length) // 2
__UpperCamelCase : List[str] = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def _SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase : Dict = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__UpperCamelCase : Any = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
__UpperCamelCase : str = {name: config.replace("Config" , "") for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
__UpperCamelCase : Tuple = collections.defaultdict(_lowerCamelCase)
__UpperCamelCase : List[str] = collections.defaultdict(_lowerCamelCase)
__UpperCamelCase : int = collections.defaultdict(_lowerCamelCase)
__UpperCamelCase : Optional[int] = collections.defaultdict(_lowerCamelCase)
__UpperCamelCase : str = collections.defaultdict(_lowerCamelCase)
# Let's lookup through all transformers object (once).
for attr_name in dir(_lowerCamelCase):
__UpperCamelCase : Union[str, Any] = None
if attr_name.endswith("Tokenizer"):
__UpperCamelCase : Union[str, Any] = slow_tokenizers
__UpperCamelCase : List[Any] = attr_name[:-9]
elif attr_name.endswith("TokenizerFast"):
__UpperCamelCase : Optional[Any] = fast_tokenizers
__UpperCamelCase : int = attr_name[:-13]
elif _re_tf_models.match(_lowerCamelCase) is not None:
__UpperCamelCase : int = tf_models
__UpperCamelCase : List[Any] = _re_tf_models.match(_lowerCamelCase).groups()[0]
elif _re_flax_models.match(_lowerCamelCase) is not None:
__UpperCamelCase : List[Any] = flax_models
__UpperCamelCase : Optional[Any] = _re_flax_models.match(_lowerCamelCase).groups()[0]
elif _re_pt_models.match(_lowerCamelCase) is not None:
__UpperCamelCase : str = pt_models
__UpperCamelCase : Tuple = _re_pt_models.match(_lowerCamelCase).groups()[0]
if lookup_dict is not None:
while len(_lowerCamelCase) > 0:
if attr_name in model_name_to_prefix.values():
__UpperCamelCase : str = True
break
# Try again after removing the last word in the name
__UpperCamelCase : int = "".join(camel_case_split(_lowerCamelCase)[:-1])
# Let's build that table!
__UpperCamelCase : Any = list(model_name_to_config.keys())
model_names.sort(key=str.lower)
__UpperCamelCase : str = ["Model", "Tokenizer slow", "Tokenizer fast", "PyTorch support", "TensorFlow support", "Flax Support"]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
__UpperCamelCase : List[str] = [len(_lowerCamelCase) + 2 for c in columns]
__UpperCamelCase : List[Any] = max([len(_lowerCamelCase) for name in model_names]) + 2
# Build the table per se
__UpperCamelCase : Optional[int] = "|" + "|".join([_center_text(_lowerCamelCase , _lowerCamelCase) for c, w in zip(_lowerCamelCase , _lowerCamelCase)]) + "|\n"
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([":" + "-" * (w - 2) + ":" for w in widths]) + "|\n"
__UpperCamelCase : Any = {True: "✅", False: "❌"}
for name in model_names:
__UpperCamelCase : Optional[int] = model_name_to_prefix[name]
__UpperCamelCase : Optional[int] = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(_lowerCamelCase , _lowerCamelCase) for l, w in zip(_lowerCamelCase , _lowerCamelCase)]) + "|\n"
return table
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str]=False) -> List[Any]:
'''simple docstring'''
__UpperCamelCase : List[str] = _find_text_in_file(
filename=os.path.join(_lowerCamelCase , "index.md") , start_prompt="<!--This table is updated automatically from the auto modules" , end_prompt="<!-- End table-->" , )
__UpperCamelCase : int = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(_lowerCamelCase , "index.md") , "w" , encoding="utf-8" , newline="\n") as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:])
else:
raise ValueError(
"The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.")
if __name__ == "__main__":
lowercase : Any = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowercase : Any = parser.parse_args()
check_model_table(args.fix_and_overwrite) | 355 |
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 10 , _lowerCamelCase : int = 22) -> int:
'''simple docstring'''
__UpperCamelCase : Any = range(1 , _lowerCamelCase)
__UpperCamelCase : int = range(1 , _lowerCamelCase)
return sum(
1 for power in powers for base in bases if len(str(base**power)) == power)
if __name__ == "__main__":
print(f"{solution(10, 22) = }") | 151 | 0 |
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger("transformers.models.speecht5")
__UpperCAmelCase = {
"speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm",
"speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection",
"speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv",
"speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed",
}
__UpperCAmelCase = {
"text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens",
"text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha",
}
__UpperCAmelCase = {
"speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0",
"speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1",
"speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer",
"speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha",
"speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer",
}
__UpperCAmelCase = {
"speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out",
"speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out",
"speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv",
"speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm",
"speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv",
"speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm",
"speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv",
"speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm",
"speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv",
"speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm",
"speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv",
"speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm",
}
__UpperCAmelCase = {
"text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens",
}
__UpperCAmelCase = {
"text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head",
}
__UpperCAmelCase = {
"encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj",
"encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj",
"encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj",
"encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj",
"encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm",
"encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense",
"encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense",
"encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm",
"encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k",
}
__UpperCAmelCase = {
"decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj",
"decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj",
"decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj",
"decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj",
"decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm",
"decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj",
"decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj",
"decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj",
"decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj",
"decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm",
"decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense",
"decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense",
"decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm",
}
__UpperCAmelCase = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
__UpperCAmelCase = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__UpperCAmelCase = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__UpperCAmelCase = []
__UpperCAmelCase = [
"encoder.version",
"encoder.layers.*.norm_k.weight",
"encoder.layers.*.norm_k.bias",
"decoder.version",
"decoder.layers.*.norm_k.weight",
"decoder.layers.*.norm_k.bias",
"decoder.pos_emb.pe_k",
"speech_encoder_prenet.embed_positions._float_tensor",
"text_decoder_prenet.embed_positions._float_tensor",
]
__UpperCAmelCase = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"speech_decoder_prenet.*",
"speech_decoder_postnet.*",
]
__UpperCAmelCase = IGNORE_KEYS + [
"encoder.proj",
"speech_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
__UpperCAmelCase = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
for attribute in key.split('''.''' ):
SCREAMING_SNAKE_CASE_ = getattr(__lowerCamelCase, __lowerCamelCase )
if weight_type is not None:
SCREAMING_SNAKE_CASE_ = getattr(__lowerCamelCase, __lowerCamelCase ).shape
else:
SCREAMING_SNAKE_CASE_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
SCREAMING_SNAKE_CASE_ = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE_ = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE_ = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE_ = value
elif weight_type == "running_mean":
SCREAMING_SNAKE_CASE_ = value
elif weight_type == "running_var":
SCREAMING_SNAKE_CASE_ = value
elif weight_type == "num_batches_tracked":
SCREAMING_SNAKE_CASE_ = value
else:
SCREAMING_SNAKE_CASE_ = value
logger.info(F'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''' )
def A__ ( __lowerCamelCase, __lowerCamelCase ):
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = []
if task == "s2t":
SCREAMING_SNAKE_CASE_ = hf_model.speechta.encoder.prenet.feature_encoder
SCREAMING_SNAKE_CASE_ = MAPPING_S2T
SCREAMING_SNAKE_CASE_ = IGNORE_KEYS_S2T
elif task == "t2s":
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = MAPPING_T2S
SCREAMING_SNAKE_CASE_ = IGNORE_KEYS_T2S
elif task == "s2s":
SCREAMING_SNAKE_CASE_ = hf_model.speechta.encoder.prenet.feature_encoder
SCREAMING_SNAKE_CASE_ = MAPPING_S2S
SCREAMING_SNAKE_CASE_ = IGNORE_KEYS_S2S
else:
raise ValueError(F'''Unsupported task: {task}''' )
for name, value in fairseq_dict.items():
if should_ignore(__lowerCamelCase, __lowerCamelCase ):
logger.info(F'''{name} was ignored''' )
continue
SCREAMING_SNAKE_CASE_ = False
if "conv_layers" in name:
load_conv_layer(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, hf_model.config.feat_extract_norm == '''group''', )
SCREAMING_SNAKE_CASE_ = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = key.split('''.*.''' )
if prefix in name and suffix in name:
SCREAMING_SNAKE_CASE_ = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
SCREAMING_SNAKE_CASE_ = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE_ = name.split(__lowerCamelCase )[0].split('''.''' )[-2]
SCREAMING_SNAKE_CASE_ = mapped_key.replace('''*''', __lowerCamelCase )
if "weight_g" in name:
SCREAMING_SNAKE_CASE_ = '''weight_g'''
elif "weight_v" in name:
SCREAMING_SNAKE_CASE_ = '''weight_v'''
elif "bias" in name:
SCREAMING_SNAKE_CASE_ = '''bias'''
elif "weight" in name:
SCREAMING_SNAKE_CASE_ = '''weight'''
elif "running_mean" in name:
SCREAMING_SNAKE_CASE_ = '''running_mean'''
elif "running_var" in name:
SCREAMING_SNAKE_CASE_ = '''running_var'''
elif "num_batches_tracked" in name:
SCREAMING_SNAKE_CASE_ = '''num_batches_tracked'''
else:
SCREAMING_SNAKE_CASE_ = None
set_recursively(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
continue
if not is_used:
unused_weights.append(__lowerCamelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = full_name.split('''conv_layers.''' )[-1]
SCREAMING_SNAKE_CASE_ = name.split('''.''' )
SCREAMING_SNAKE_CASE_ = int(items[0] )
SCREAMING_SNAKE_CASE_ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
SCREAMING_SNAKE_CASE_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
SCREAMING_SNAKE_CASE_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
SCREAMING_SNAKE_CASE_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
SCREAMING_SNAKE_CASE_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__lowerCamelCase )
@torch.no_grad()
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, ):
if config_path is not None:
SCREAMING_SNAKE_CASE_ = SpeechTaConfig.from_pretrained(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE_ = SpeechTaConfig()
if task == "s2t":
SCREAMING_SNAKE_CASE_ = config.max_text_positions
SCREAMING_SNAKE_CASE_ = SpeechTaForSpeechToText(__lowerCamelCase )
elif task == "t2s":
SCREAMING_SNAKE_CASE_ = 18_76
SCREAMING_SNAKE_CASE_ = 6_00
SCREAMING_SNAKE_CASE_ = config.max_speech_positions
SCREAMING_SNAKE_CASE_ = SpeechTaForTextToSpeech(__lowerCamelCase )
elif task == "s2s":
SCREAMING_SNAKE_CASE_ = 18_76
SCREAMING_SNAKE_CASE_ = config.max_speech_positions
SCREAMING_SNAKE_CASE_ = SpeechTaForSpeechToSpeech(__lowerCamelCase )
else:
raise ValueError(F'''Unknown task name: {task}''' )
if vocab_path:
SCREAMING_SNAKE_CASE_ = SpeechTaTokenizer(__lowerCamelCase, model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_ = AddedToken('''<mask>''', lstrip=__lowerCamelCase, rstrip=__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
SCREAMING_SNAKE_CASE_ = SpeechTaFeatureExtractor()
SCREAMING_SNAKE_CASE_ = SpeechTaProcessor(tokenizer=__lowerCamelCase, feature_extractor=__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = torch.load(__lowerCamelCase )
recursively_load_weights(fairseq_checkpoint['''model'''], __lowerCamelCase, __lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
if repo_id:
print('''Pushing to the hub...''' )
processor.push_to_hub(__lowerCamelCase )
model.push_to_hub(__lowerCamelCase )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"--task",
default="s2t",
type=str,
help="Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
__UpperCAmelCase = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 299 |
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
__UpperCAmelCase = logging.getLogger(__name__)
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ ="summarization"
UpperCAmelCase_ =["loss"]
UpperCAmelCase_ =ROUGE_KEYS
UpperCAmelCase_ ="rouge2"
def __init__( self , _A , **_A ) -> Tuple:
if hparams.sortish_sampler and hparams.gpus > 1:
SCREAMING_SNAKE_CASE_ = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('''Dynamic Batch size does not work for multi-gpu training''' )
if hparams.sortish_sampler:
raise ValueError('''--sortish_sampler and --max_tokens_per_batch may not be used simultaneously''' )
super().__init__(_A , num_labels=_A , mode=self.mode , **_A )
use_task_specific_params(self.model , '''summarization''' )
save_git_info(self.hparams.output_dir )
SCREAMING_SNAKE_CASE_ = Path(self.output_dir ) / '''metrics.json'''
SCREAMING_SNAKE_CASE_ = Path(self.output_dir ) / '''hparams.pkl'''
pickle_save(self.hparams , self.hparams_save_path )
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = defaultdict(_A )
SCREAMING_SNAKE_CASE_ = self.config.model_type
SCREAMING_SNAKE_CASE_ = self.config.tgt_vocab_size if self.model_type == '''fsmt''' else self.config.vocab_size
SCREAMING_SNAKE_CASE_ = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
SCREAMING_SNAKE_CASE_ = {
'''train''': self.hparams.n_train,
'''val''': self.hparams.n_val,
'''test''': self.hparams.n_test,
}
SCREAMING_SNAKE_CASE_ = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
SCREAMING_SNAKE_CASE_ = {
'''train''': self.hparams.max_target_length,
'''val''': self.hparams.val_max_target_length,
'''test''': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F'''target_lens: {self.target_lens}'''
assert self.target_lens["train"] <= self.target_lens["test"], F'''target_lens: {self.target_lens}'''
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
SCREAMING_SNAKE_CASE_ = get_git_info()['''repo_sha''']
SCREAMING_SNAKE_CASE_ = hparams.num_workers
SCREAMING_SNAKE_CASE_ = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , _A ):
SCREAMING_SNAKE_CASE_ = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
SCREAMING_SNAKE_CASE_ = self.decoder_start_token_id
SCREAMING_SNAKE_CASE_ = (
SeqaSeqDataset if hasattr(self.tokenizer , '''prepare_seq2seq_batch''' ) else LegacySeqaSeqDataset
)
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
SCREAMING_SNAKE_CASE_ = self.hparams.eval_max_gen_length
else:
SCREAMING_SNAKE_CASE_ = self.model.config.max_length
SCREAMING_SNAKE_CASE_ = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def _UpperCamelCase ( self , _A ) -> Dict[str, List[str]]:
SCREAMING_SNAKE_CASE_ = {
k: self.tokenizer.batch_decode(v.tolist() ) if '''mask''' not in k else v.shape for k, v in batch.items()
}
save_json(_A , Path(self.output_dir ) / '''text_batch.json''' )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / '''tok_batch.json''' )
SCREAMING_SNAKE_CASE_ = True
return readable_batch
def _UpperCamelCase ( self , _A , **_A ) -> List[str]:
return self.model(_A , **_A )
def _UpperCamelCase ( self , _A ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.tokenizer.batch_decode(
_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A )
return lmap(str.strip , _A )
def _UpperCamelCase ( self , _A ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.tokenizer.pad_token_id
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = batch['''input_ids'''], batch['''attention_mask''']
SCREAMING_SNAKE_CASE_ = batch['''labels''']
if isinstance(self.model , _A ):
SCREAMING_SNAKE_CASE_ = self.model._shift_right(_A )
else:
SCREAMING_SNAKE_CASE_ = shift_tokens_right(_A , _A )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
SCREAMING_SNAKE_CASE_ = decoder_input_ids
self.save_readable_batch(_A )
SCREAMING_SNAKE_CASE_ = self(_A , attention_mask=_A , decoder_input_ids=_A , use_cache=_A )
SCREAMING_SNAKE_CASE_ = outputs['''logits''']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
SCREAMING_SNAKE_CASE_ = nn.CrossEntropyLoss(ignore_index=_A )
assert lm_logits.shape[-1] == self.vocab_size
SCREAMING_SNAKE_CASE_ = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
SCREAMING_SNAKE_CASE_ = nn.functional.log_softmax(_A , dim=-1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = label_smoothed_nll_loss(
_A , _A , self.hparams.label_smoothing , ignore_index=_A )
return (loss,)
@property
def _UpperCamelCase ( self ) -> int:
return self.tokenizer.pad_token_id
def _UpperCamelCase ( self , _A , _A ) -> Dict:
SCREAMING_SNAKE_CASE_ = self._step(_A )
SCREAMING_SNAKE_CASE_ = dict(zip(self.loss_names , _A ) )
# tokens per batch
SCREAMING_SNAKE_CASE_ = batch['''input_ids'''].ne(self.pad ).sum() + batch['''labels'''].ne(self.pad ).sum()
SCREAMING_SNAKE_CASE_ = batch['''input_ids'''].shape[0]
SCREAMING_SNAKE_CASE_ = batch['''input_ids'''].eq(self.pad ).sum()
SCREAMING_SNAKE_CASE_ = batch['''input_ids'''].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def _UpperCamelCase ( self , _A , _A ) -> Dict:
return self._generative_step(_A )
def _UpperCamelCase ( self , _A , _A="val" ) -> Dict:
self.step_count += 1
SCREAMING_SNAKE_CASE_ = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
SCREAMING_SNAKE_CASE_ = losses['''loss''']
SCREAMING_SNAKE_CASE_ = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['''gen_time''', '''gen_len''']
}
SCREAMING_SNAKE_CASE_ = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
SCREAMING_SNAKE_CASE_ = torch.tensor(_A ).type_as(_A )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(_A )
SCREAMING_SNAKE_CASE_ = {F'''{prefix}_avg_{k}''': x for k, x in losses.items()}
SCREAMING_SNAKE_CASE_ = self.step_count
self.metrics[prefix].append(_A ) # callback writes this to self.metrics_save_path
SCREAMING_SNAKE_CASE_ = flatten_list([x['''preds'''] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F'''{prefix}_loss''': loss,
F'''{prefix}_{self.val_metric}''': metric_tensor,
}
def _UpperCamelCase ( self , _A , _A ) -> Dict:
return calculate_rouge(_A , _A )
def _UpperCamelCase ( self , _A ) -> dict:
SCREAMING_SNAKE_CASE_ = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
SCREAMING_SNAKE_CASE_ = self.model.generate(
batch['''input_ids'''] , attention_mask=batch['''attention_mask'''] , use_cache=_A , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
SCREAMING_SNAKE_CASE_ = (time.time() - ta) / batch['''input_ids'''].shape[0]
SCREAMING_SNAKE_CASE_ = self.ids_to_clean_text(_A )
SCREAMING_SNAKE_CASE_ = self.ids_to_clean_text(batch['''labels'''] )
SCREAMING_SNAKE_CASE_ = self._step(_A )
SCREAMING_SNAKE_CASE_ = dict(zip(self.loss_names , _A ) )
SCREAMING_SNAKE_CASE_ = self.calc_generative_metrics(_A , _A )
SCREAMING_SNAKE_CASE_ = np.mean(lmap(_A , _A ) )
base_metrics.update(gen_time=_A , gen_len=_A , preds=_A , target=_A , **_A )
return base_metrics
def _UpperCamelCase ( self , _A , _A ) -> Any:
return self._generative_step(_A )
def _UpperCamelCase ( self , _A ) -> Optional[int]:
return self.validation_epoch_end(_A , prefix='''test''' )
def _UpperCamelCase ( self , _A ) -> SeqaSeqDataset:
SCREAMING_SNAKE_CASE_ = self.n_obs[type_path]
SCREAMING_SNAKE_CASE_ = self.target_lens[type_path]
SCREAMING_SNAKE_CASE_ = self.dataset_class(
self.tokenizer , type_path=_A , n_obs=_A , max_target_length=_A , **self.dataset_kwargs , )
return dataset
def _UpperCamelCase ( self , _A , _A , _A = False ) -> DataLoader:
SCREAMING_SNAKE_CASE_ = self.get_dataset(_A )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
SCREAMING_SNAKE_CASE_ = dataset.make_sortish_sampler(_A , distributed=self.hparams.gpus > 1 )
return DataLoader(
_A , batch_size=_A , collate_fn=dataset.collate_fn , shuffle=_A , num_workers=self.num_workers , sampler=_A , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
SCREAMING_SNAKE_CASE_ = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
_A , batch_sampler=_A , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
_A , batch_size=_A , collate_fn=dataset.collate_fn , shuffle=_A , num_workers=self.num_workers , sampler=_A , )
def _UpperCamelCase ( self ) -> DataLoader:
SCREAMING_SNAKE_CASE_ = self.get_dataloader('''train''' , batch_size=self.hparams.train_batch_size , shuffle=_A )
return dataloader
def _UpperCamelCase ( self ) -> DataLoader:
return self.get_dataloader('''val''' , batch_size=self.hparams.eval_batch_size )
def _UpperCamelCase ( self ) -> DataLoader:
return self.get_dataloader('''test''' , batch_size=self.hparams.eval_batch_size )
@staticmethod
def _UpperCamelCase ( _A , _A ) -> Dict:
BaseTransformer.add_model_specific_args(_A , _A )
add_generic_args(_A , _A )
parser.add_argument(
'''--max_source_length''' , default=1024 , type=_A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--max_target_length''' , default=56 , type=_A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--val_max_target_length''' , default=142 , type=_A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--test_max_target_length''' , default=142 , type=_A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument('''--freeze_encoder''' , action='''store_true''' )
parser.add_argument('''--freeze_embeds''' , action='''store_true''' )
parser.add_argument('''--sortish_sampler''' , action='''store_true''' , default=_A )
parser.add_argument('''--overwrite_output_dir''' , action='''store_true''' , default=_A )
parser.add_argument('''--max_tokens_per_batch''' , type=_A , default=_A )
parser.add_argument('''--logger_name''' , type=_A , choices=['''default''', '''wandb''', '''wandb_shared'''] , default='''default''' )
parser.add_argument('''--n_train''' , type=_A , default=-1 , required=_A , help='''# examples. -1 means use all.''' )
parser.add_argument('''--n_val''' , type=_A , default=500 , required=_A , help='''# examples. -1 means use all.''' )
parser.add_argument('''--n_test''' , type=_A , default=-1 , required=_A , help='''# examples. -1 means use all.''' )
parser.add_argument(
'''--task''' , type=_A , default='''summarization''' , required=_A , help='''# examples. -1 means use all.''' )
parser.add_argument('''--label_smoothing''' , type=_A , default=0.0 , required=_A )
parser.add_argument('''--src_lang''' , type=_A , default='''''' , required=_A )
parser.add_argument('''--tgt_lang''' , type=_A , default='''''' , required=_A )
parser.add_argument('''--eval_beams''' , type=_A , default=_A , required=_A )
parser.add_argument(
'''--val_metric''' , type=_A , default=_A , required=_A , choices=['''bleu''', '''rouge2''', '''loss''', None] )
parser.add_argument('''--eval_max_gen_length''' , type=_A , default=_A , help='''never generate more than n tokens''' )
parser.add_argument('''--save_top_k''' , type=_A , default=1 , required=_A , help='''How many checkpoints to save''' )
parser.add_argument(
'''--early_stopping_patience''' , type=_A , default=-1 , required=_A , help=(
'''-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'''
''' val_check_interval will effect it.'''
) , )
return parser
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ ="translation"
UpperCAmelCase_ =["loss"]
UpperCAmelCase_ =["bleu"]
UpperCAmelCase_ ="bleu"
def __init__( self , _A , **_A ) -> Optional[int]:
super().__init__(_A , **_A )
SCREAMING_SNAKE_CASE_ = hparams.src_lang
SCREAMING_SNAKE_CASE_ = hparams.tgt_lang
def _UpperCamelCase ( self , _A , _A ) -> dict:
return calculate_bleu(_A , _A )
def A__ ( __lowerCamelCase, __lowerCamelCase=None ):
Path(args.output_dir ).mkdir(exist_ok=__lowerCamelCase )
check_output_dir(__lowerCamelCase, expected_items=3 )
if model is None:
if "summarization" in args.task:
SCREAMING_SNAKE_CASE_ = SummarizationModule(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE_ = TranslationModule(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('''/tmp''' )
or str(args.output_dir ).startswith('''/var''' )
):
SCREAMING_SNAKE_CASE_ = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
SCREAMING_SNAKE_CASE_ = os.environ.get('''WANDB_PROJECT''', __lowerCamelCase )
SCREAMING_SNAKE_CASE_ = WandbLogger(name=model.output_dir.name, project=__lowerCamelCase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
SCREAMING_SNAKE_CASE_ = WandbLogger(name=model.output_dir.name, project=F'''hf_{dataset}''' )
if args.early_stopping_patience >= 0:
SCREAMING_SNAKE_CASE_ = get_early_stopping_callback(model.val_metric, args.early_stopping_patience )
else:
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = args.val_metric == '''loss'''
SCREAMING_SNAKE_CASE_ = generic_train(
__lowerCamelCase, __lowerCamelCase, logging_callback=SeqaSeqLoggingCallback(), checkpoint_callback=get_checkpoint_callback(
args.output_dir, model.val_metric, args.save_top_k, __lowerCamelCase ), early_stopping_callback=__lowerCamelCase, logger=__lowerCamelCase, )
pickle_save(model.hparams, model.output_dir / '''hparams.pkl''' )
if not args.do_predict:
return model
SCREAMING_SNAKE_CASE_ = ''''''
SCREAMING_SNAKE_CASE_ = sorted(glob.glob(os.path.join(args.output_dir, '''*.ckpt''' ), recursive=__lowerCamelCase ) )
if checkpoints:
SCREAMING_SNAKE_CASE_ = checkpoints[-1]
SCREAMING_SNAKE_CASE_ = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
__UpperCAmelCase = pl.Trainer.add_argparse_args(parser)
__UpperCAmelCase = SummarizationModule.add_model_specific_args(parser, os.getcwd())
__UpperCAmelCase = parser.parse_args()
main(args)
| 299 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case : Tuple = {'configuration_vit_mae': ['VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMAEConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : int = [
'VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMAEForPreTraining',
'ViTMAELayer',
'ViTMAEModel',
'ViTMAEPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[Any] = [
'TFViTMAEForPreTraining',
'TFViTMAEModel',
'TFViTMAEPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
__snake_case : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 18 | '''simple docstring'''
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
__snake_case : Any = '\\n@inproceedings{lin-2004-rouge,\n title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",\n author = "Lin, Chin-Yew",\n booktitle = "Text Summarization Branches Out",\n month = jul,\n year = "2004",\n address = "Barcelona, Spain",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W04-1013",\n pages = "74--81",\n}\n'
__snake_case : Dict = '\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n'
__snake_case : Optional[int] = '\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,\n `"rougeL"`: Longest common subsequence based scoring.\n `"rougeLSum"`: rougeLsum splits text using `"\n"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results["rouge1"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results["rouge1"].mid.fmeasure)\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def __A ( self ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/google-research/google-research/tree/master/rouge'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/ROUGE_(metric)''',
'''https://github.com/google-research/google-research/tree/master/rouge''',
] , )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False ) -> Optional[int]:
if rouge_types is None:
A_ = ['''rouge1''', '''rouge2''', '''rougeL''', '''rougeLsum''']
A_ = rouge_scorer.RougeScorer(rouge_types=_SCREAMING_SNAKE_CASE , use_stemmer=_SCREAMING_SNAKE_CASE )
if use_aggregator:
A_ = scoring.BootstrapAggregator()
else:
A_ = []
for ref, pred in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
A_ = scorer.score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if use_aggregator:
aggregator.add_scores(_SCREAMING_SNAKE_CASE )
else:
scores.append(_SCREAMING_SNAKE_CASE )
if use_aggregator:
A_ = aggregator.aggregate()
else:
A_ = {}
for key in scores[0]:
A_ = [score[key] for score in scores]
return result
| 18 | 1 |
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowercase ( UpperCamelCase__,UpperCamelCase__ ):
@register_to_config
def __init__( self , *,
_a = 4 , _a = 768 , _a , _a , ) -> str:
super().__init__()
_A : List[Any] = nn.Parameter(torch.zeros(_a ) )
# parameters for additional clip time embeddings
_A : Union[str, Any] = nn.Linear(_a , _a )
_A : List[str] = nn.Linear(_a , _a )
# parameters for encoder hidden states
_A : List[str] = clip_extra_context_tokens
_A : Optional[int] = nn.Linear(
_a , self.clip_extra_context_tokens * cross_attention_dim )
_A : Union[str, Any] = nn.Linear(_a , _a )
_A : Tuple = nn.LayerNorm(_a )
def a__ ( self , *, _a , _a , _a , _a ) -> Union[str, Any]:
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
_A : Union[str, Any] = image_embeddings.shape[0]
_A : Optional[Any] = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
_A : str = classifier_free_guidance_embeddings.expand(
_a , -1 )
_A : List[str] = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
_A : Any = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
_A : Any = self.embedding_proj(_a )
_A : Dict = self.clip_image_embeddings_project_to_time_embeddings(_a )
_A : List[str] = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
_A : Optional[int] = self.clip_extra_context_tokens_proj(_a )
_A : Union[str, Any] = clip_extra_context_tokens.reshape(_a , -1 , self.clip_extra_context_tokens )
_A : str = clip_extra_context_tokens.permute(0 , 2 , 1 )
_A : Dict = self.encoder_hidden_states_proj(_a )
_A : Any = self.text_encoder_hidden_states_norm(_a )
_A : Union[str, Any] = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 26 |
def lowerCAmelCase_ ( snake_case_ ):
if n_term == "":
return []
_A : list = []
for temp in range(int(snake_case_ ) ):
series.append(f'''1/{temp + 1}''' if series else """1""" )
return series
if __name__ == "__main__":
_snake_case = input("Enter the last number (nth term) of the Harmonic Series")
print("Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n")
print(harmonic_series(nth_term))
| 26 | 1 |
import cva
import numpy as np
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : List[Any] , lowercase : float , lowercase : int ):
'''simple docstring'''
if k in (0.04, 0.06):
_snake_case = k
_snake_case = window_size
else:
raise ValueError('invalid k value' )
def __str__( self : int ):
'''simple docstring'''
return str(self.k )
def A ( self : List[str] , lowercase : str ):
'''simple docstring'''
_snake_case = cva.imread(lowercase , 0 )
_snake_case , _snake_case = img.shape
_snake_case = []
_snake_case = img.copy()
_snake_case = cva.cvtColor(lowercase , cva.COLOR_GRAY2RGB )
_snake_case , _snake_case = np.gradient(lowercase )
_snake_case = dx**2
_snake_case = dy**2
_snake_case = dx * dy
_snake_case = 0.04
_snake_case = self.window_size // 2
for y in range(lowercase , h - offset ):
for x in range(lowercase , w - offset ):
_snake_case = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_snake_case = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_snake_case = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_snake_case = (wxx * wyy) - (wxy**2)
_snake_case = wxx + wyy
_snake_case = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
_lowerCamelCase : List[str] = HarrisCorner(0.0_4, 3)
_lowerCamelCase , _lowerCamelCase : Optional[Any] = edge_detect.detect('''path_to_image''')
cva.imwrite('''detect.png''', color_img) | 130 |
import baseaa
def a_ ( __lowercase : str ) -> bytes:
return baseaa.aaaencode(string.encode('utf-8' ) )
def a_ ( __lowercase : bytes ) -> str:
return baseaa.aaadecode(__lowercase ).decode('utf-8' )
if __name__ == "__main__":
import doctest
doctest.testmod() | 130 | 1 |
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'nielsr/canine-s': 2048,
}
# Unicode defines 1,114,112 total “codepoints”
_A = 111_4112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
_A = 0
_A = 0xe_0_0_0
_A = 0xe_0_0_1
_A = 0xe_0_0_2
_A = 0xe_0_0_3
_A = 0xe_0_0_4
# Maps special codepoints to human-readable names.
_A = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
_A = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , A_=chr(A_ ) , A_=chr(A_ ) , A_=chr(A_ ) , A_=chr(A_ ) , A_=chr(A_ ) , A_=chr(A_ ) , A_=False , A_=2048 , **A_ , ) -> str:
__UpperCamelCase =AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else bos_token
__UpperCamelCase =AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else eos_token
__UpperCamelCase =AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else sep_token
__UpperCamelCase =AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else cls_token
__UpperCamelCase =AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCamelCase =AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
super().__init__(
bos_token=A_ , eos_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , add_prefix_space=A_ , model_max_length=A_ , **A_ , )
# Creates a mapping for looking up the IDs of special symbols.
__UpperCamelCase ={}
for codepoint, name in SPECIAL_CODEPOINTS.items():
__UpperCamelCase =codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
__UpperCamelCase ={
codepoint: name for name, codepoint in self._special_codepoints.items()
}
__UpperCamelCase =UNICODE_VOCAB_SIZE
__UpperCamelCase =len(self._special_codepoints )
@property
def _a ( self ) -> int:
return self._unicode_vocab_size
def _a ( self , A_ ) -> List[str]:
return list(A_ )
def _a ( self , A_ ) -> int:
try:
return ord(A_ )
except TypeError:
raise ValueError(f'invalid token: \'{token}\'' )
def _a ( self , A_ ) -> str:
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(A_ )
except TypeError:
raise ValueError(f'invalid id: {index}' )
def _a ( self , A_ ) -> List[str]:
return "".join(A_ )
def _a ( self , A_ , A_ = None ) -> List[int]:
__UpperCamelCase =[self.sep_token_id]
__UpperCamelCase =[self.cls_token_id]
__UpperCamelCase =cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def _a ( self , A_ , A_ = None , A_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
__UpperCamelCase =[1] + ([0] * len(A_ )) + [1]
if token_ids_a is not None:
result += ([0] * len(A_ )) + [1]
return result
def _a ( self , A_ , A_ = None ) -> List[int]:
__UpperCamelCase =[self.sep_token_id]
__UpperCamelCase =[self.cls_token_id]
__UpperCamelCase =len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def _a ( self , A_ , A_ = None ) -> List[Any]:
return ()
| 62 |
_a = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def _a ( SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
__lowerCAmelCase: Optional[int] = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_00_00]
number //= 10_00_00
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
_a = [None] * 1_0_0_0_0_0_0_0
_a = True
_a = False
def _a ( SCREAMING_SNAKE_CASE : int ) -> bool:
"""simple docstring"""
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
__lowerCAmelCase: int = chain(next_number(SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase: Tuple = number_chain
while number < 10_00_00_00:
__lowerCAmelCase: Dict = number_chain
number *= 10
return number_chain
def _a ( SCREAMING_SNAKE_CASE : int = 10_00_00_00 ) -> int:
"""simple docstring"""
for i in range(1 , SCREAMING_SNAKE_CASE ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{solution() = }")
| 322 | 0 |
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class UpperCAmelCase__ :
def __init__( self ) -> List[Any]:
__UpperCamelCase = """"""
__UpperCamelCase = """"""
__UpperCamelCase = []
__UpperCamelCase = 0
__UpperCamelCase = 2_5_6
__UpperCamelCase = 0
__UpperCamelCase = 0
__UpperCamelCase = 0
__UpperCamelCase = 0
def __lowerCamelCase ( self , lowercase ) -> int:
__UpperCamelCase = cva.imread(UpperCamelCase__ , 0 )
__UpperCamelCase = copy.deepcopy(self.img )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] , label="""x""" )
__UpperCamelCase = np.sum(UpperCamelCase__ )
for i in range(len(UpperCamelCase__ ) ):
__UpperCamelCase = x[i] / self.k
self.sk += prk
__UpperCamelCase = (self.L - 1) * self.sk
if self.rem != 0:
__UpperCamelCase = int(last % last )
__UpperCamelCase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(UpperCamelCase__ )
__UpperCamelCase = int(np.ma.count(self.img ) / self.img[1].size )
__UpperCamelCase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
__UpperCamelCase = self.img[j][i]
if num != self.last_list[num]:
__UpperCamelCase = self.last_list[num]
cva.imwrite("""output_data/output.jpg""" , self.img )
def __lowerCamelCase ( self ) -> List[str]:
plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] )
def __lowerCamelCase ( self ) -> List[Any]:
cva.imshow("""Output-Image""" , self.img )
cva.imshow("""Input-Image""" , self.original_image )
cva.waitKey(5_0_0_0 )
cva.destroyAllWindows()
if __name__ == "__main__":
a__ : List[str] = os.path.join(os.path.basename(__file__), 'image_data/input.jpg')
a__ : Optional[Any] = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 363 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a__ : List[Any] = {
'configuration_vision_text_dual_encoder': ['VisionTextDualEncoderConfig'],
'processing_vision_text_dual_encoder': ['VisionTextDualEncoderProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : str = ['VisionTextDualEncoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any = ['FlaxVisionTextDualEncoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[int] = ['TFVisionTextDualEncoderModel']
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
a__ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 243 | 0 |
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: list[int] ) -> int:
'''simple docstring'''
if not numbers:
return 0
if not isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) or not all(
isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for number in numbers ):
raise ValueError("numbers must be an iterable of integers" )
A__ = A__ = A__ = numbers[0]
for i in range(1 , len(SCREAMING_SNAKE_CASE_ ) ):
# update the maximum and minimum subarray products
A__ = numbers[i]
if number < 0:
A__ , A__ = min_till_now, max_till_now
A__ = max(SCREAMING_SNAKE_CASE_ , max_till_now * number )
A__ = min(SCREAMING_SNAKE_CASE_ , min_till_now * number )
# update the maximum product found till now
A__ = max(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return max_prod
| 68 |
"""simple docstring"""
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
debug_launcher(test_script.main )
def UpperCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
debug_launcher(test_ops.main )
| 16 | 0 |
"""simple docstring"""
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = tmp_path / "cache"
UpperCamelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase = JsonDatasetReader(_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , keep_in_memory=_SCREAMING_SNAKE_CASE ).read()
_check_json_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = tmp_path / "cache"
UpperCamelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCamelCase = features.copy() if features else default_expected_features
UpperCamelCase = (
Features({feature: Value(_SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase = JsonDatasetReader(_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ).read()
_check_json_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"features" , [
None,
{"col_3": "float64", "col_1": "string", "col_2": "int64"},
] , )
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = tmp_path / "cache"
UpperCamelCase = {"col_3": "float64", "col_1": "string", "col_2": "int64"}
UpperCamelCase = features.copy() if features else default_expected_features
UpperCamelCase = (
Features({feature: Value(_SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase = JsonDatasetReader(_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ).read()
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = {"col_2": "int64", "col_3": "float64", "col_1": "string"}
UpperCamelCase = features.copy()
UpperCamelCase = (
Features({feature: Value(_SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase = tmp_path / "cache"
UpperCamelCase = JsonDatasetReader(_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ).read()
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = tmp_path / "cache"
UpperCamelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCamelCase = JsonDatasetReader(_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , split=_SCREAMING_SNAKE_CASE ).read()
_check_json_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCamelCase = jsonl_path
elif issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCamelCase = [jsonl_path]
UpperCamelCase = tmp_path / "cache"
UpperCamelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCamelCase = JsonDatasetReader(_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ).read()
_check_json_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=("train",) ):
"""simple docstring"""
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for split in splits:
UpperCamelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = tmp_path / "cache"
UpperCamelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase = JsonDatasetReader({"train": jsonl_path} , cache_dir=_SCREAMING_SNAKE_CASE , keep_in_memory=_SCREAMING_SNAKE_CASE ).read()
_check_json_datasetdict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = tmp_path / "cache"
UpperCamelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCamelCase = features.copy() if features else default_expected_features
UpperCamelCase = (
Features({feature: Value(_SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase = JsonDatasetReader({"train": jsonl_path} , features=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ).read()
_check_json_datasetdict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if split:
UpperCamelCase = {split: jsonl_path}
else:
UpperCamelCase = "train"
UpperCamelCase = {"train": jsonl_path, "test": jsonl_path}
UpperCamelCase = tmp_path / "cache"
UpperCamelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCamelCase = JsonDatasetReader(_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ).read()
_check_json_datasetdict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return json.load(_SCREAMING_SNAKE_CASE )
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return [json.loads(_SCREAMING_SNAKE_CASE ) for line in buffer]
class _lowerCamelCase :
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def snake_case_ (self , __a , __a , __a ) -> Tuple:
with io.BytesIO() as buffer:
JsonDatasetWriter(__a , __a , lines=__a ).write()
buffer.seek(0 )
UpperCamelCase = load_json_function(__a )
assert isinstance(__a , __a )
assert isinstance(exported_content[0] , __a )
assert len(__a ) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def snake_case_ (self , __a , __a , __a , __a , __a ) -> str:
with io.BytesIO() as buffer:
JsonDatasetWriter(__a , __a , lines=__a , orient=__a ).write()
buffer.seek(0 )
UpperCamelCase = load_json(__a )
assert isinstance(__a , __a )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__a , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__a ) == 10
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def snake_case_ (self , __a , __a , __a ) -> str:
with io.BytesIO() as buffer:
JsonDatasetWriter(__a , __a , lines=__a , num_proc=2 ).write()
buffer.seek(0 )
UpperCamelCase = load_json_function(__a )
assert isinstance(__a , __a )
assert isinstance(exported_content[0] , __a )
assert len(__a ) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def snake_case_ (self , __a , __a , __a , __a , __a ) -> List[str]:
with io.BytesIO() as buffer:
JsonDatasetWriter(__a , __a , lines=__a , orient=__a , num_proc=2 ).write()
buffer.seek(0 )
UpperCamelCase = load_json(__a )
assert isinstance(__a , __a )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__a , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__a ) == 10
def snake_case_ (self , __a ) -> Tuple:
with pytest.raises(__a ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__a , __a , num_proc=0 )
@pytest.mark.parametrize("compression, extension" , [("gzip", "gz"), ("bz2", "bz2"), ("xz", "xz")] )
def snake_case_ (self , __a , __a , __a , __a , __a ) -> Dict:
UpperCamelCase = tmp_path_factory.mktemp("data" ) / F"test.json.{extension}"
UpperCamelCase = str(shared_datadir / F"test_file.json.{extension}" )
JsonDatasetWriter(__a , __a , compression=__a ).write()
with fsspec.open(__a , "rb" , compression="infer" ) as f:
UpperCamelCase = f.read()
with fsspec.open(__a , "rb" , compression="infer" ) as f:
UpperCamelCase = f.read()
assert exported_content == original_content
| 358 |
"""simple docstring"""
import doctest
from collections import deque
import numpy as np
class _lowerCamelCase :
def __init__(self ) -> None:
UpperCamelCase = [2, 1, 2, -1]
UpperCamelCase = [1, 2, 3, 4]
def snake_case_ (self ) -> list[float]:
UpperCamelCase = len(self.first_signal )
UpperCamelCase = len(self.second_signal )
UpperCamelCase = max(__a , __a )
# create a zero matrix of max_length x max_length
UpperCamelCase = [[0] * max_length for i in range(__a )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(__a ):
UpperCamelCase = deque(self.second_signal )
rotated_signal.rotate(__a )
for j, item in enumerate(__a ):
matrix[i][j] += item
# multiply the matrix with the first signal
UpperCamelCase = np.matmul(np.transpose(__a ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(__a , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 244 | 0 |
import numpy as np
def __A ( __lowerCamelCase ) -> np.array:
return 1 / (1 + np.exp(-vector ))
def __A ( __lowerCamelCase ) -> np.array:
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 228 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class __lowerCAmelCase ( pl.LightningModule ):
def __init__( self :Union[str, Any] , __magic_name__ :Optional[int] ):
'''simple docstring'''
super().__init__()
a = model
a = 2
a = nn.Linear(self.model.config.hidden_size , self.num_labels )
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
pass
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
# load longformer model from model identifier
a = LongformerModel.from_pretrained(__lowerCamelCase )
a = LightningModel(__lowerCamelCase )
a = torch.load(__lowerCamelCase , map_location=torch.device("""cpu""" ) )
lightning_model.load_state_dict(ckpt["""state_dict"""] )
# init longformer question answering model
a = LongformerForQuestionAnswering.from_pretrained(__lowerCamelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(__lowerCamelCase )
print(f'Conversion successful. Model saved under {pytorch_dump_folder_path}' )
if __name__ == "__main__":
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__UpperCamelCase : List[Any] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 228 | 1 |
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
a_ = logging.get_logger(__name__) # pylint: disable=invalid-name
a_ = 2_56
class snake_case ( _UpperCamelCase):
__UpperCamelCase = ['melgan']
def __init__( self : Tuple , a__ : SpectrogramNotesEncoder , a__ : SpectrogramContEncoder , a__ : TaFilmDecoder , a__ : DDPMScheduler , a__ : OnnxRuntimeModel if is_onnx_available() else Any , ) -> None:
'''simple docstring'''
super().__init__()
# From MELGAN
_A = math.log(1E-5 ) # Matches MelGAN training.
_A = 4.0 # Largest value for most examples
_A = 1_28
self.register_modules(
notes_encoder=a__ , continuous_encoder=a__ , decoder=a__ , scheduler=a__ , melgan=a__ , )
def a_ ( self : str , a__ : Tuple , a__ : Union[str, Any]=(-1.0, 1.0) , a__ : List[str]=False ) -> Optional[int]:
'''simple docstring'''
_A , _A = output_range
if clip:
_A = torch.clip(a__ , self.min_value , self.max_value )
# Scale to [0, 1].
_A = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def a_ ( self : str , a__ : Optional[Any] , a__ : Union[str, Any]=(-1.0, 1.0) , a__ : str=False ) -> str:
'''simple docstring'''
_A , _A = input_range
_A = torch.clip(a__ , a__ , a__ ) if clip else outputs
# Scale to [0, 1].
_A = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def a_ ( self : List[str] , a__ : Dict , a__ : List[str] , a__ : Optional[Any] ) -> Dict:
'''simple docstring'''
_A = input_tokens > 0
_A , _A = self.notes_encoder(
encoder_input_tokens=a__ , encoder_inputs_mask=a__ )
_A , _A = self.continuous_encoder(
encoder_inputs=a__ , encoder_inputs_mask=a__ )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def a_ ( self : Optional[Any] , a__ : List[Any] , a__ : Union[str, Any] , a__ : int ) -> int:
'''simple docstring'''
_A = noise_time
if not torch.is_tensor(a__ ):
_A = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(a__ ) and len(timesteps.shape ) == 0:
_A = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_A = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
_A = self.decoder(
encodings_and_masks=a__ , decoder_input_tokens=a__ , decoder_noise_time=a__ )
return logits
@torch.no_grad()
def __call__( self : Optional[int] , a__ : List[List[int]] , a__ : Optional[torch.Generator] = None , a__ : int = 1_00 , a__ : bool = True , a__ : str = "numpy" , a__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , a__ : int = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
'''simple docstring'''
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(a__ , a__ ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(a__ )}.""" )
_A = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
_A = np.zeros([1, 0, self.n_dims] , np.floataa )
_A = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=a__ , device=self.device )
for i, encoder_input_tokens in enumerate(a__ ):
if i == 0:
_A = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
_A = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=a__ , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
_A = ones
_A = self.scale_features(
a__ , output_range=[-1.0, 1.0] , clip=a__ )
_A = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=a__ , continuous_mask=a__ , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
_A = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=a__ , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(a__ )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
_A = self.decode(
encodings_and_masks=a__ , input_tokens=a__ , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
_A = self.scheduler.step(a__ , a__ , a__ , generator=a__ ).prev_sample
_A = self.scale_to_features(a__ , input_range=[-1.0, 1.0] )
_A = mel[:1]
_A = mel.cpu().float().numpy()
_A = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(a__ , a__ )
logger.info("Generated segment" , a__ )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'." )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'." )
if output_type == "numpy":
_A = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
_A = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=a__ ) | 370 |
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class snake_case ( unittest.TestCase):
def a_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
_A = torch.nn.Linear(10 , 10 )
_A = torch.optim.SGD(model.parameters() , 0.1 )
_A = Accelerator()
_A = accelerator.prepare(a__ )
try:
pickle.loads(pickle.dumps(a__ ) )
except Exception as e:
self.fail(F"""Accelerated optimizer pickling failed with {e}""" )
AcceleratorState._reset_state() | 163 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''',
}
class lowercase ( A__ , A__ ):
"""simple docstring"""
_a = 'resnet'
_a = ['basic', 'bottleneck']
def __init__( self , UpperCamelCase_=3 , UpperCamelCase_=64 , UpperCamelCase_=[256, 512, 1024, 2048] , UpperCamelCase_=[3, 4, 6, 3] , UpperCamelCase_="bottleneck" , UpperCamelCase_="relu" , UpperCamelCase_=False , UpperCamelCase_=None , UpperCamelCase_=None , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
if layer_type not in self.layer_types:
raise ValueError(F'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
UpperCamelCase__ :Optional[Any] = num_channels
UpperCamelCase__ :int = embedding_size
UpperCamelCase__ :Dict = hidden_sizes
UpperCamelCase__ :int = depths
UpperCamelCase__ :int = layer_type
UpperCamelCase__ :List[Any] = hidden_act
UpperCamelCase__ :Optional[int] = downsample_in_first_stage
UpperCamelCase__ :Tuple = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(UpperCamelCase_ ) + 1 )]
UpperCamelCase__ , UpperCamelCase__ :Tuple = get_aligned_output_features_output_indices(
out_features=UpperCamelCase_ , out_indices=UpperCamelCase_ , stage_names=self.stage_names )
class lowercase ( A__ ):
"""simple docstring"""
_a = version.parse('1.11' )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return 1e-3 | 97 |
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ =CustomTokenizer
pass | 76 | 0 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
UpperCAmelCase : List[Any] = logging.get_logger(__name__)
# General docstring
UpperCAmelCase : Optional[int] = 'MobileNetV1Config'
# Base docstring
UpperCAmelCase : Optional[Any] = 'google/mobilenet_v1_1.0_224'
UpperCAmelCase : Any = [1, 1024, 7, 7]
# Image classification docstring
UpperCAmelCase : Dict = 'google/mobilenet_v1_1.0_224'
UpperCAmelCase : List[str] = 'tabby, tabby cat'
UpperCAmelCase : str = [
'google/mobilenet_v1_1.0_224',
'google/mobilenet_v1_0.75_192',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None ) -> Dict:
'''simple docstring'''
lowercase_ = {}
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase_ = model.mobilenet_va
else:
lowercase_ = model
lowercase_ = """MobilenetV1/Conv2d_0/"""
lowercase_ = backbone.conv_stem.convolution.weight
lowercase_ = backbone.conv_stem.normalization.bias
lowercase_ = backbone.conv_stem.normalization.weight
lowercase_ = backbone.conv_stem.normalization.running_mean
lowercase_ = backbone.conv_stem.normalization.running_var
for i in range(13 ):
lowercase_ = i + 1
lowercase_ = i * 2
lowercase_ = backbone.layer[pt_index]
lowercase_ = F'''MobilenetV1/Conv2d_{tf_index}_depthwise/'''
lowercase_ = pointer.convolution.weight
lowercase_ = pointer.normalization.bias
lowercase_ = pointer.normalization.weight
lowercase_ = pointer.normalization.running_mean
lowercase_ = pointer.normalization.running_var
lowercase_ = backbone.layer[pt_index + 1]
lowercase_ = F'''MobilenetV1/Conv2d_{tf_index}_pointwise/'''
lowercase_ = pointer.convolution.weight
lowercase_ = pointer.normalization.bias
lowercase_ = pointer.normalization.weight
lowercase_ = pointer.normalization.running_mean
lowercase_ = pointer.normalization.running_var
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase_ = """MobilenetV1/Logits/Conv2d_1c_1x1/"""
lowercase_ = model.classifier.weight
lowercase_ = model.classifier.bias
return tf_to_pt_map
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"""Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see """
"""https://www.tensorflow.org/install/ for installation instructions.""" )
raise
# Load weights from TF model
lowercase_ = tf.train.list_variables(lowerCAmelCase__ )
lowercase_ = {}
for name, shape in init_vars:
logger.info(F'''Loading TF weight {name} with shape {shape}''' )
lowercase_ = tf.train.load_variable(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase_ = array
# Build TF to PyTorch weights loading map
lowercase_ = _build_tf_to_pytorch_map(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
for name, pointer in tf_to_pt_map.items():
logger.info(F'''Importing {name}''' )
if name not in tf_weights:
logger.info(F'''{name} not in tf pre-trained weights, skipping''' )
continue
lowercase_ = tf_weights[name]
if "depthwise_weights" in name:
logger.info("""Transposing depthwise""" )
lowercase_ = np.transpose(lowerCAmelCase__ , (2, 3, 0, 1) )
elif "weights" in name:
logger.info("""Transposing""" )
if len(pointer.shape ) == 2: # copying into linear layer
lowercase_ = array.squeeze().transpose()
else:
lowercase_ = np.transpose(lowerCAmelCase__ , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F'''Pointer shape {pointer.shape} and array shape {array.shape} mismatched''' )
logger.info(F'''Initialize PyTorch weight {name} {array.shape}''' )
lowercase_ = torch.from_numpy(lowerCAmelCase__ )
tf_weights.pop(lowerCAmelCase__ , lowerCAmelCase__ )
tf_weights.pop(name + """/RMSProp""" , lowerCAmelCase__ )
tf_weights.pop(name + """/RMSProp_1""" , lowerCAmelCase__ )
tf_weights.pop(name + """/ExponentialMovingAverage""" , lowerCAmelCase__ )
logger.info(F'''Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}''' )
return model
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ = features.shape[-2:]
lowercase_ = conv_layer.stride
lowercase_ = conv_layer.kernel_size
if in_height % stride_height == 0:
lowercase_ = max(kernel_height - stride_height , 0 )
else:
lowercase_ = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
lowercase_ = max(kernel_width - stride_width , 0 )
else:
lowercase_ = max(kernel_width - (in_width % stride_width) , 0 )
lowercase_ = pad_along_width // 2
lowercase_ = pad_along_width - pad_left
lowercase_ = pad_along_height // 2
lowercase_ = pad_along_height - pad_top
lowercase_ = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(lowerCAmelCase__ , lowerCAmelCase__ , """constant""" , 0.0 )
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[Any] = 1 , lowerCAmelCase_ : Optional[int] = 1 , lowerCAmelCase_ : Any = False , lowerCAmelCase_ : List[Any] = True , lowerCAmelCase_ : Union[str, Any] = True , ):
"""simple docstring"""
super().__init__()
lowercase_ = config
if in_channels % groups != 0:
raise ValueError(F'''Input channels ({in_channels}) are not divisible by {groups} groups.''')
if out_channels % groups != 0:
raise ValueError(F'''Output channels ({out_channels}) are not divisible by {groups} groups.''')
lowercase_ = 0 if config.tf_padding else int((kernel_size - 1) / 2)
lowercase_ = nn.Convad(
in_channels=SCREAMING_SNAKE_CASE_ , out_channels=SCREAMING_SNAKE_CASE_ , kernel_size=SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , groups=SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ , padding_mode="""zeros""" , )
if use_normalization:
lowercase_ = nn.BatchNormad(
num_features=SCREAMING_SNAKE_CASE_ , eps=config.layer_norm_eps , momentum=0.9_997 , affine=SCREAMING_SNAKE_CASE_ , track_running_stats=SCREAMING_SNAKE_CASE_ , )
else:
lowercase_ = None
if use_activation:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
lowercase_ = ACTaFN[use_activation]
elif isinstance(config.hidden_act , SCREAMING_SNAKE_CASE_):
lowercase_ = ACTaFN[config.hidden_act]
else:
lowercase_ = config.hidden_act
else:
lowercase_ = None
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : str):
"""simple docstring"""
if self.config.tf_padding:
lowercase_ = apply_tf_padding(SCREAMING_SNAKE_CASE_ , self.convolution)
lowercase_ = self.convolution(SCREAMING_SNAKE_CASE_)
if self.normalization is not None:
lowercase_ = self.normalization(SCREAMING_SNAKE_CASE_)
if self.activation is not None:
lowercase_ = self.activation(SCREAMING_SNAKE_CASE_)
return features
class SCREAMING_SNAKE_CASE__ ( _lowerCAmelCase ):
lowercase__ = MobileNetVaConfig
lowercase__ = load_tf_weights_in_mobilenet_va
lowercase__ = "mobilenet_v1"
lowercase__ = "pixel_values"
lowercase__ = False
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : int):
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE_ , (nn.Linear, nn.Convad)):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(SCREAMING_SNAKE_CASE_ , nn.BatchNormad):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
UpperCAmelCase : Union[str, Any] = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
UpperCAmelCase : int = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." , _lowerCAmelCase , )
class SCREAMING_SNAKE_CASE__ ( _lowerCAmelCase ):
def __init__( self : Tuple , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple = True):
"""simple docstring"""
super().__init__(SCREAMING_SNAKE_CASE_)
lowercase_ = config
lowercase_ = 3_2
lowercase_ = max(int(depth * config.depth_multiplier) , config.min_depth)
lowercase_ = MobileNetVaConvLayer(
SCREAMING_SNAKE_CASE_ , in_channels=config.num_channels , out_channels=SCREAMING_SNAKE_CASE_ , kernel_size=3 , stride=2 , )
lowercase_ = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
lowercase_ = nn.ModuleList()
for i in range(1_3):
lowercase_ = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
lowercase_ = max(int(depth * config.depth_multiplier) , config.min_depth)
self.layer.append(
MobileNetVaConvLayer(
SCREAMING_SNAKE_CASE_ , in_channels=SCREAMING_SNAKE_CASE_ , out_channels=SCREAMING_SNAKE_CASE_ , kernel_size=3 , stride=strides[i] , groups=SCREAMING_SNAKE_CASE_ , ))
self.layer.append(
MobileNetVaConvLayer(
SCREAMING_SNAKE_CASE_ , in_channels=SCREAMING_SNAKE_CASE_ , out_channels=SCREAMING_SNAKE_CASE_ , kernel_size=1 , ))
lowercase_ = nn.AdaptiveAvgPoolad((1, 1)) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : Optional[int]):
"""simple docstring"""
raise NotImplementedError
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : List[Any] = None , lowerCAmelCase_ : Tuple = None , lowerCAmelCase_ : Tuple = None , ):
"""simple docstring"""
lowercase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""")
lowercase_ = self.conv_stem(SCREAMING_SNAKE_CASE_)
lowercase_ = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer):
lowercase_ = layer_module(SCREAMING_SNAKE_CASE_)
if output_hidden_states:
lowercase_ = all_hidden_states + (hidden_states,)
lowercase_ = hidden_states
if self.pooler is not None:
lowercase_ = torch.flatten(self.pooler(SCREAMING_SNAKE_CASE_) , start_dim=1)
else:
lowercase_ = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None)
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE_ , pooler_output=SCREAMING_SNAKE_CASE_ , hidden_states=SCREAMING_SNAKE_CASE_ , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , _lowerCAmelCase , )
class SCREAMING_SNAKE_CASE__ ( _lowerCAmelCase ):
def __init__( self : Dict , lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
super().__init__(SCREAMING_SNAKE_CASE_)
lowercase_ = config.num_labels
lowercase_ = MobileNetVaModel(SCREAMING_SNAKE_CASE_)
lowercase_ = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
lowercase_ = nn.Dropout(config.classifier_dropout_prob , inplace=SCREAMING_SNAKE_CASE_)
lowercase_ = nn.Linear(SCREAMING_SNAKE_CASE_ , config.num_labels) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : List[Any] = None , lowerCAmelCase_ : Union[str, Any] = None , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Union[str, Any] = None , ):
"""simple docstring"""
lowercase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ = self.mobilenet_va(SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_)
lowercase_ = outputs.pooler_output if return_dict else outputs[1]
lowercase_ = self.classifier(self.dropout(SCREAMING_SNAKE_CASE_))
lowercase_ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase_ = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase_ = """single_label_classification"""
else:
lowercase_ = """multi_label_classification"""
if self.config.problem_type == "regression":
lowercase_ = MSELoss()
if self.num_labels == 1:
lowercase_ = loss_fct(logits.squeeze() , labels.squeeze())
else:
lowercase_ = loss_fct(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
elif self.config.problem_type == "single_label_classification":
lowercase_ = CrossEntropyLoss()
lowercase_ = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
lowercase_ = BCEWithLogitsLoss()
lowercase_ = loss_fct(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
if not return_dict:
lowercase_ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=SCREAMING_SNAKE_CASE_ , logits=SCREAMING_SNAKE_CASE_ , hidden_states=outputs.hidden_states , )
| 366 |
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> list[str]:
'''simple docstring'''
if nth_term == "":
return [""]
lowercase_ = int(__lowerCAmelCase )
lowercase_ = int(__lowerCAmelCase )
lowercase_ = []
for temp in range(int(__lowerCAmelCase ) ):
series.append(F'''1 / {pow(temp + 1 , int(__lowerCAmelCase ) )}''' if series else """1""" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase : List[str] = int(input("Enter the last number (nth term) of the P-Series"))
UpperCAmelCase : Tuple = int(input("Enter the power for P-Series"))
print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
print(p_series(nth_term, power))
| 313 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCamelCase ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_A : Union[str, Any] = BlenderbotSmallTokenizer
_A : List[Any] = False
def lowerCAmelCase_ ( self: Any ) -> Dict:
super().setUp()
snake_case_ :Dict = ["""__start__""", """adapt""", """act""", """ap@@""", """te""", """__end__""", """__unk__"""]
snake_case_ :Dict = dict(zip(snake_case , range(len(snake_case ) ) ) )
snake_case_ :Union[str, Any] = ["""#version: 0.2""", """a p""", """t e</w>""", """ap t</w>""", """a d""", """ad apt</w>""", """a c""", """ac t</w>""", """"""]
snake_case_ :Dict = {"""unk_token""": """__unk__""", """bos_token""": """__start__""", """eos_token""": """__end__"""}
snake_case_ :Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case_ :str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(snake_case ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(snake_case ) )
def lowerCAmelCase_ ( self: List[Any] , **snake_case: Optional[int] ) -> str:
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **snake_case )
def lowerCAmelCase_ ( self: Any , snake_case: Optional[Any] ) -> Any:
snake_case_ :Any = """adapt act apte"""
snake_case_ :int = """adapt act apte"""
return input_text, output_text
def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[int]:
snake_case_ :Any = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case_ :Dict = """adapt act apte"""
snake_case_ :Optional[Any] = ["""adapt""", """act""", """ap@@""", """te"""]
snake_case_ :List[str] = tokenizer.tokenize(snake_case )
self.assertListEqual(snake_case , snake_case )
snake_case_ :int = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
snake_case_ :str = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case ) , snake_case )
def lowerCAmelCase_ ( self: Optional[Any] ) -> Union[str, Any]:
snake_case_ :List[Any] = BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" )
assert tok("""sam""" ).input_ids == [1_384]
snake_case_ :str = """I am a small frog."""
snake_case_ :Dict = tok([src_text] , padding=snake_case , truncation=snake_case )["""input_ids"""]
snake_case_ :Optional[Any] = tok.batch_decode(snake_case , skip_special_tokens=snake_case , clean_up_tokenization_spaces=snake_case )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def lowerCAmelCase_ ( self: str ) -> str:
snake_case_ :Optional[int] = BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" )
snake_case_ :str = """I am a small frog ."""
snake_case_ :Dict = """."""
snake_case_ :int = tok(snake_case )["""input_ids"""]
snake_case_ :List[Any] = tok(snake_case )["""input_ids"""]
assert encoded[-1] == encoded_dot[0]
| 66 |
from itertools import permutations
def SCREAMING_SNAKE_CASE__ ( __a ):
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
snake_case_ : Any = [7, 11, 13, 17]
for i, test in enumerate(__a ):
if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def SCREAMING_SNAKE_CASE__ ( __a = 10 ):
return sum(
int(''.join(map(__a , __a ) ) )
for num in permutations(range(__a ) )
if is_substring_divisible(__a ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 327 | 0 |
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[str]:
snake_case : List[str] = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(lowercase ,lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> str:
snake_case : Optional[int] = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
snake_case : List[Any] = s_dict.pop(lowercase )
elif "subsample" in key:
snake_case : int = s_dict.pop(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[Any]:
snake_case , snake_case : Dict = emb.weight.shape
snake_case : Dict = nn.Linear(lowercase ,lowercase ,bias=lowercase )
snake_case : Dict = emb.weight.data
return lin_layer
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Tuple:
snake_case : str = torch.load(lowercase ,map_location="""cpu""" )
snake_case : Union[str, Any] = mam_aaa["""args"""]
snake_case : int = mam_aaa["""model"""]
snake_case : List[Any] = state_dict["""decoder.output_projection.weight"""]
remove_ignore_keys_(lowercase )
rename_keys(lowercase )
snake_case : List[Any] = state_dict["""decoder.embed_tokens.weight"""].shape[0]
snake_case : List[str] = args.share_decoder_input_output_embed
snake_case : int = [int(lowercase ) for i in args.conv_kernel_sizes.split(""",""" )]
snake_case : Optional[Any] = SpeechaTextConfig(
vocab_size=lowercase ,max_source_positions=args.max_source_positions ,max_target_positions=args.max_target_positions ,encoder_layers=args.encoder_layers ,decoder_layers=args.decoder_layers ,encoder_attention_heads=args.encoder_attention_heads ,decoder_attention_heads=args.decoder_attention_heads ,encoder_ffn_dim=args.encoder_ffn_embed_dim ,decoder_ffn_dim=args.decoder_ffn_embed_dim ,d_model=args.encoder_embed_dim ,dropout=args.dropout ,attention_dropout=args.attention_dropout ,activation_dropout=args.activation_dropout ,activation_function="""relu""" ,num_conv_layers=len(lowercase ) ,conv_channels=args.conv_channels ,conv_kernel_sizes=lowercase ,input_feat_per_channel=args.input_feat_per_channel ,input_channels=args.input_channels ,tie_word_embeddings=lowercase ,num_beams=5 ,max_length=200 ,use_cache=lowercase ,decoder_start_token_id=2 ,early_stopping=lowercase ,)
snake_case : Union[str, Any] = SpeechaTextForConditionalGeneration(lowercase )
snake_case , snake_case : int = model.model.load_state_dict(lowercase ,strict=lowercase )
if len(lowercase ) > 0 and not set(lowercase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"""Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"""
f""" but all the following weights are missing {missing}""" )
if tie_embeds:
snake_case : Dict = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
snake_case : Any = lm_head_weights
model.save_pretrained(lowercase )
if __name__ == "__main__":
lowerCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
lowerCamelCase : Dict = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 176 |
def SCREAMING_SNAKE_CASE__ ( lowercase = 1000 ) -> int:
snake_case : Optional[int] = 3
snake_case : List[Any] = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 176 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase__ :str = "▁"
lowercase__ :List[str] = {"vocab_file": "spiece.model"}
lowercase__ :List[Any] = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"}
}
lowercase__ :Optional[int] = {
"google/pegasus-xsum": 512,
}
lowercase__ :str = logging.get_logger(__name__)
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Dict =VOCAB_FILES_NAMES
lowercase_ : Optional[Any] =VOCAB_FILES_NAMES
lowercase_ : List[str] =PRETRAINED_VOCAB_FILES_MAP
lowercase_ : List[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : List[str] =['''input_ids''', '''attention_mask''']
def __init__( self ,A__ ,A__="<pad>" ,A__="</s>" ,A__="<unk>" ,A__="<mask_2>" ,A__="<mask_1>" ,A__=None ,A__=1_0_3 ,A__ = None ,**A__ ,):
lowercase = offset
if additional_special_tokens is not None:
if not isinstance(A__ ,A__):
raise TypeError(
f'additional_special_tokens should be of type {type(A__)}, but is'
f' {type(A__)}')
lowercase = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'<unk_{i}>' for i in range(len(A__) ,self.offset - 1)
]
if len(set(A__)) != len(A__):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
f' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.')
lowercase = additional_special_tokens_extended
else:
lowercase = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'<unk_{i}>' for i in range(2 ,self.offset)]
lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=A__ ,unk_token=A__ ,mask_token=A__ ,pad_token=A__ ,mask_token_sent=A__ ,offset=A__ ,additional_special_tokens=A__ ,sp_model_kwargs=self.sp_model_kwargs ,**A__ ,)
lowercase = mask_token_sent
lowercase = vocab_file
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(A__)
# add special tokens to encoder dict
lowercase = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
})
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 ,self.offset - 1)})
lowercase = {v: k for k, v in self.encoder.items()}
@property
def A__ ( self):
return len(self.sp_model) + self.offset
def A__ ( self):
lowercase = {self.convert_ids_to_tokens(A__): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self):
lowercase = self.__dict__.copy()
lowercase = None
return state
def __setstate__( self ,A__):
lowercase = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs'''):
lowercase = {}
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def A__ ( self ,A__):
return self.sp_model.encode(A__ ,out_type=A__)
def A__ ( self ,A__):
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
lowercase = self.sp_model.piece_to_id(A__)
return sp_id + self.offset
def A__ ( self ,A__):
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
lowercase = self.sp_model.IdToPiece(index - self.offset)
return token
def A__ ( self ,A__):
lowercase = []
lowercase = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A__) + token
lowercase = []
else:
current_sub_tokens.append(A__)
out_string += self.sp_model.decode(A__)
return out_string.strip()
def A__ ( self ,A__=False):
return 1
def A__ ( self ,A__):
lowercase = set(self.all_special_ids) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def A__ ( self ,A__ ,A__ = None ,A__ = False):
if already_has_special_tokens:
return self._special_token_mask(A__)
elif token_ids_a is None:
return self._special_token_mask(A__) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a) + [1]
def A__ ( self ,A__ ,A__=None):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def A__ ( self ,A__ ,A__ = None):
if not os.path.isdir(A__):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
lowercase = os.path.join(
A__ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(A__) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file ,A__)
elif not os.path.isfile(self.vocab_file):
with open(A__ ,'''wb''') as fi:
lowercase = self.sp_model.serialized_model_proto()
fi.write(A__)
return (out_vocab_file,)
| 101 |
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
lowercase__ :str = 2
class lowercase :
def __init__( self ,*, # begin keyword-only arguments
A__="<s>" ,A__="<pad>" ,A__="</s>" ,A__="<unk>" ,A__=None ,):
lowercase , lowercase , lowercase , lowercase = bos, unk, pad, eos
lowercase = []
lowercase = []
lowercase = {}
lowercase = self.add_symbol(A__)
lowercase = self.add_symbol(A__)
lowercase = self.add_symbol(A__)
lowercase = self.add_symbol(A__)
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(A__)
lowercase = len(self.symbols)
def __eq__( self ,A__):
return self.indices == other.indices
def __getitem__( self ,A__):
if idx < len(self.symbols):
return self.symbols[idx]
return self.unk_word
def __len__( self):
return len(self.symbols)
def __contains__( self ,A__):
return sym in self.indices
@classmethod
def A__ ( cls ,A__):
lowercase = cls()
d.add_from_file(A__)
return d
def A__ ( self ,A__ ,A__=1 ,A__=False):
if word in self.indices and not overwrite:
lowercase = self.indices[word]
lowercase = self.count[idx] + n
return idx
else:
lowercase = len(self.symbols)
lowercase = idx
self.symbols.append(A__)
self.count.append(A__)
return idx
def A__ ( self ,A__):
return 0
def A__ ( self ,A__):
if isinstance(A__ ,A__):
try:
with open(A__ ,'''r''' ,encoding='''utf-8''') as fd:
self.add_from_file(A__)
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('''Incorrect encoding detected in {}, please rebuild the dataset'''.format(A__))
return
lowercase = f.readlines()
lowercase = self._load_meta(A__)
for line in lines[indices_start_line:]:
try:
lowercase , lowercase = line.rstrip().rsplit(''' ''' ,1)
if field == "#fairseq:overwrite":
lowercase = True
lowercase , lowercase = line.rsplit(''' ''' ,1)
else:
lowercase = False
lowercase = int(A__)
lowercase = line
if word in self and not overwrite:
raise RuntimeError(
'''Duplicate word found when loading Dictionary: \'{}\'. '''
'''Duplicate words can overwrite earlier ones by adding the '''
'''#fairseq:overwrite flag at the end of the corresponding row '''
'''in the dictionary file. If using the Camembert model, please '''
'''download an updated copy of the model file.'''.format(A__))
self.add_symbol(A__ ,n=A__ ,overwrite=A__)
except ValueError:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt> [flags]\'''')
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
lowercase = dict((re.sub(R'''@@$''' , '''''' , lowerCAmelCase__ ), v) if k.endswith('''@@''' ) else (re.sub(R'''$''' , '''</w>''' , lowerCAmelCase__ ), v) for k, v in d.items() )
lowercase = '''<s> <pad> </s> <unk>'''.split()
# restore the special tokens
for k in keep_keys:
del da[f'{k}</w>']
lowercase = d[k] # restore
return da
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
# prep
if not os.path.exists(lowerCAmelCase__ ):
raise ValueError(f'path {biogpt_checkpoint_path} does not exist!' )
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
print(f'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
lowercase = os.path.join(lowerCAmelCase__ , '''checkpoint.pt''' )
if not os.path.isfile(lowerCAmelCase__ ):
raise ValueError(f'path to the file {checkpoint_file} does not exist!' )
lowercase = torch.load(lowerCAmelCase__ , map_location='''cpu''' )
lowercase = chkpt['''cfg''']['''model''']
# dicts
lowercase = os.path.join(lowerCAmelCase__ , '''dict.txt''' )
if not os.path.isfile(lowerCAmelCase__ ):
raise ValueError(f'path to the file {dict_file} does not exist!' )
lowercase = Dictionary.load(lowerCAmelCase__ )
lowercase = rewrite_dict_keys(src_dict.indices )
lowercase = len(lowerCAmelCase__ )
lowercase = os.path.join(lowerCAmelCase__ , VOCAB_FILES_NAMES['''vocab_file'''] )
print(f'Generating {src_vocab_file} of {src_vocab_size} records' )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ , indent=lowerCAmelCase__ ) )
# merges_file (bpecodes)
lowercase = os.path.join(lowerCAmelCase__ , '''bpecodes''' )
if not os.path.isfile(lowerCAmelCase__ ):
raise ValueError(f'path to the file {bpecodes_file} does not exist!' )
lowercase = os.path.join(lowerCAmelCase__ , VOCAB_FILES_NAMES['''merges_file'''] )
shutil.copyfile(lowerCAmelCase__ , lowerCAmelCase__ )
# model config
lowercase = os.path.join(lowerCAmelCase__ , '''config.json''' )
lowercase = {
'''activation_dropout''': args['''activation_dropout'''],
'''architectures''': ['''BioGptForCausalLM'''],
'''attention_probs_dropout_prob''': args['''attention_dropout'''],
'''bos_token_id''': 0,
'''eos_token_id''': 2,
'''hidden_act''': args['''activation_fn'''],
'''hidden_dropout_prob''': args['''dropout'''],
'''hidden_size''': args['''decoder_embed_dim'''],
'''initializer_range''': 0.02,
'''intermediate_size''': args['''decoder_ffn_embed_dim'''],
'''layer_norm_eps''': 1E-12,
'''layerdrop''': args['''decoder_layerdrop'''],
'''max_position_embeddings''': args['''max_target_positions'''],
'''model_type''': '''biogpt''',
'''num_attention_heads''': args['''decoder_attention_heads'''],
'''num_hidden_layers''': args['''decoder_layers'''],
'''pad_token_id''': 1,
'''scale_embedding''': not args['''no_scale_embedding'''],
'''tie_word_embeddings''': args['''share_decoder_input_output_embed'''],
'''vocab_size''': src_vocab_size,
}
# good hparam defaults to start with
print(f'Generating {biogpt_model_config_file}' )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ , indent=lowerCAmelCase__ ) )
# tokenizer config
lowercase = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = {
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
'''model_max_length''': 1024,
'''pad_token''': '''<pad>''',
'''special_tokens_map_file''': None,
'''tokenizer_class''': '''BioGptTokenizer''',
'''unk_token''': '''<unk>''',
}
print(f'Generating {biogpt_tokenizer_config_file}' )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ , indent=lowerCAmelCase__ ) )
# model
lowercase = chkpt['''model''']
# remove unneeded keys
lowercase = [
'''decoder.version''',
]
for k in ignore_keys:
model_state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('''output_projection.weight''' ):
lowercase = model_state_dict.pop(lowerCAmelCase__ )
else:
lowercase = model_state_dict.pop(lowerCAmelCase__ )
lowercase = BioGptConfig.from_pretrained(lowerCAmelCase__ )
lowercase = BioGptForCausalLM(lowerCAmelCase__ )
# check that it loads ok
model_new.load_state_dict(lowerCAmelCase__ )
# save
lowercase = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
print(f'Generating {pytorch_weights_dump_path}' )
torch.save(lowerCAmelCase__ , lowerCAmelCase__ )
print('''Conversion is done!''' )
if __name__ == "__main__":
lowercase__ :Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--biogpt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowercase__ :Any = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 101 | 1 |
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class __a :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=7 , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=33 , lowerCAmelCase__=32 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=512 , lowerCAmelCase__=16 , lowerCAmelCase__=2 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=None , ) -> List[Any]:
'''simple docstring'''
lowercase__: str = parent
lowercase__: List[str] = batch_size
lowercase__: Dict = seq_length
lowercase__: int = is_training
lowercase__: str = use_input_mask
lowercase__: Optional[int] = use_token_type_ids
lowercase__: Any = use_labels
lowercase__: List[str] = vocab_size
lowercase__: Union[str, Any] = hidden_size
lowercase__: Optional[int] = num_hidden_layers
lowercase__: Optional[int] = num_attention_heads
lowercase__: Dict = intermediate_size
lowercase__: Union[str, Any] = hidden_act
lowercase__: List[str] = hidden_dropout_prob
lowercase__: int = attention_probs_dropout_prob
lowercase__: str = max_position_embeddings
lowercase__: List[Any] = type_vocab_size
lowercase__: Optional[Any] = type_sequence_label_size
lowercase__: Tuple = initializer_range
lowercase__: str = num_labels
lowercase__: Optional[int] = num_choices
lowercase__: Dict = scope
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
lowercase__: str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__: List[str] = None
if self.use_input_mask:
lowercase__: int = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__: Optional[Any] = None
lowercase__: List[str] = None
lowercase__: List[str] = None
if self.use_labels:
lowercase__: Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__: List[str] = ids_tensor([self.batch_size] , self.num_choices )
lowercase__: Union[str, Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
lowercase__: Any = EsmModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__: str = model(snake_case__ , attention_mask=snake_case__ )
lowercase__: Union[str, Any] = model(snake_case__ )
lowercase__: Any = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
lowercase__: int = EsmForMaskedLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__: Dict = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
lowercase__: int = self.num_labels
lowercase__: Dict = EsmForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__: List[str] = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
lowercase__: Optional[int] = self.prepare_config_and_inputs()
(
lowercase__
): Dict = config_and_inputs
lowercase__: Union[str, Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __a ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
__lowercase : List[Any] = False
__lowercase : Optional[Any] = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
__lowercase : str = ()
__lowercase : Dict = (
{
'feature-extraction': EsmModel,
'fill-mask': EsmForMaskedLM,
'text-classification': EsmForSequenceClassification,
'token-classification': EsmForTokenClassification,
'zero-shot': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowercase : List[str] = True
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: Optional[int] = EsmModelTester(self )
lowercase__: Tuple = ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__: Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase__: int = type
self.model_tester.create_and_check_model(*snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
lowercase__: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case__ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__: Dict = EsmModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Any = self.model_tester.prepare_config_and_inputs()[0]
lowercase__: Dict = EsmEmbeddings(config=snake_case__ )
lowercase__: List[Any] = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
lowercase__: List[str] = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
lowercase__: Optional[Any] = create_position_ids_from_input_ids(snake_case__ , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(snake_case__ , snake_case__ ) ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__: Any = self.model_tester.prepare_config_and_inputs()[0]
lowercase__: Any = EsmEmbeddings(config=snake_case__ )
lowercase__: Optional[Any] = torch.empty(2 , 4 , 30 )
lowercase__: Dict = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
lowercase__: List[str] = torch.as_tensor([expected_single_positions, expected_single_positions] )
lowercase__: Optional[int] = embeddings.create_position_ids_from_inputs_embeds(snake_case__ )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(snake_case__ , snake_case__ ) ) )
@unittest.skip('Esm does not support embedding resizing' )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip('Esm does not support embedding resizing' )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
pass
@require_torch
class __a ( SCREAMING_SNAKE_CASE_ ):
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
with torch.no_grad():
lowercase__: Optional[int] = EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
lowercase__: str = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowercase__: Tuple = model(snake_case__ )[0]
lowercase__: int = 33
lowercase__: Dict = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , snake_case__ )
lowercase__: Optional[int] = torch.tensor(
[[[8.9_2_1_5, -1_0.5_8_9_8, -6.4_6_7_1], [-6.3_9_6_7, -1_3.9_1_1_4, -1.1_2_1_2], [-7.7_8_1_2, -1_3.9_5_1_6, -3.7_4_0_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
with torch.no_grad():
lowercase__: List[str] = EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
lowercase__: Tuple = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowercase__: Dict = model(snake_case__ )[0]
# compare the actual values for a slice.
lowercase__: str = torch.tensor(
[[[0.1_4_4_4, 0.5_4_1_3, 0.3_2_4_8], [0.3_0_3_4, 0.0_0_5_3, 0.3_1_0_8], [0.3_2_2_8, -0.2_4_9_9, 0.3_4_1_5]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1E-4 ) )
| 353 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'''google/pegasus-large''': '''https://huggingface.co/google/pegasus-large/resolve/main/config.json''',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class __a ( __UpperCamelCase ):
__lowercase : Any = 'pegasus'
__lowercase : Union[str, Any] = ['past_key_values']
__lowercase : Any = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , lowerCAmelCase__=50_265 , lowerCAmelCase__=1_024 , lowerCAmelCase__=12 , lowerCAmelCase__=4_096 , lowerCAmelCase__=16 , lowerCAmelCase__=12 , lowerCAmelCase__=4_096 , lowerCAmelCase__=16 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__="gelu" , lowerCAmelCase__=1_024 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=0 , lowerCAmelCase__=False , lowerCAmelCase__=0 , lowerCAmelCase__=1 , lowerCAmelCase__=1 , **lowerCAmelCase__ , ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: int = vocab_size
lowercase__: Optional[int] = max_position_embeddings
lowercase__: List[str] = d_model
lowercase__: Optional[Any] = encoder_ffn_dim
lowercase__: Optional[Any] = encoder_layers
lowercase__: Union[str, Any] = encoder_attention_heads
lowercase__: Optional[int] = decoder_ffn_dim
lowercase__: Tuple = decoder_layers
lowercase__: Union[str, Any] = decoder_attention_heads
lowercase__: Dict = dropout
lowercase__: List[str] = attention_dropout
lowercase__: List[str] = activation_dropout
lowercase__: Optional[int] = activation_function
lowercase__: Dict = init_std
lowercase__: Optional[Any] = encoder_layerdrop
lowercase__: List[str] = decoder_layerdrop
lowercase__: Union[str, Any] = use_cache
lowercase__: Any = encoder_layers
lowercase__: List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , forced_eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
return self.d_model
| 288 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
# Construct model
if gpta_config_file == "":
__lowerCamelCase = GPTaConfig()
else:
__lowerCamelCase = GPTaConfig.from_json_file(UpperCamelCase__ )
__lowerCamelCase = GPTaModel(UpperCamelCase__ )
# Load weights from numpy
load_tf_weights_in_gpta(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model
__lowerCamelCase = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
__lowerCamelCase = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , UpperCamelCase__ )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__UpperCAmelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--gpt2_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--gpt2_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
__UpperCAmelCase =parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 67 | '''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ ) -> Optional[Any]:
__lowerCamelCase = []
__lowerCamelCase = set({'''(''', '''[''', '''{'''} )
__lowerCamelCase = set({''')''', ''']''', '''}'''} )
__lowerCamelCase = {'''{''': '''}''', '''[''': ''']''', '''(''': ''')'''}
for i in range(len(UpperCamelCase__ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(UpperCamelCase__ ) == 0 or (len(UpperCamelCase__ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(UpperCamelCase__ ) == 0
def __lowerCAmelCase ( ) -> str:
__lowerCamelCase = input('''Enter sequence of brackets: ''' )
if is_balanced(UpperCamelCase__ ):
print(UpperCamelCase__ , '''is balanced''' )
else:
print(UpperCamelCase__ , '''is not balanced''' )
if __name__ == "__main__":
main()
| 67 | 1 |
'''simple docstring'''
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
_lowerCAmelCase = yaml.safe_load(
"\\nname: \"\"\nallow_empty: false\nallow_empty_text: true\nsubsections:\n - name: \"Dataset Card for X\" # First-level markdown heading\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: \"Table of Contents\"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: \"Dataset Description\"\n allow_empty: false\n allow_empty_text: false\n subsections:\n - name: \"Dataset Summary\"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: \"Supported Tasks and Leaderboards\"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: Languages\n allow_empty: false\n allow_empty_text: true\n subsections: null\n"
)
_lowerCAmelCase = {
"name": "root",
"text": "",
"is_empty_text": True,
"subsections": [
{
"name": "Dataset Card for My Dataset",
"text": "",
"is_empty_text": True,
"subsections": [
{"name": "Table of Contents", "text": "Some text here.", "is_empty_text": False, "subsections": []},
{
"name": "Dataset Description",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Dataset Summary",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [],
},
{
"name": "Supported Tasks and Leaderboards",
"text": "",
"is_empty_text": True,
"subsections": [],
},
{"name": "Languages", "text": "Language Text", "is_empty_text": False, "subsections": []},
],
},
],
}
],
}
_lowerCAmelCase = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowerCAmelCase = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n#### Extra Ignored Subsection\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowerCAmelCase = {
"name": "root",
"text": "",
"is_empty_text": True,
"subsections": [
{
"name": "Dataset Card for My Dataset",
"text": "",
"is_empty_text": True,
"subsections": [
{"name": "Table of Contents", "text": "Some text here.", "is_empty_text": False, "subsections": []},
{
"name": "Dataset Description",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Dataset Summary",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Extra Ignored Subsection",
"text": "",
"is_empty_text": True,
"subsections": [],
}
],
},
{
"name": "Supported Tasks and Leaderboards",
"text": "",
"is_empty_text": True,
"subsections": [],
},
{"name": "Languages", "text": "Language Text", "is_empty_text": False, "subsections": []},
],
},
],
}
],
}
_lowerCAmelCase = "\\n---\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowerCAmelCase = (
"The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README."
)
_lowerCAmelCase = "\\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowerCAmelCase = (
"The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README."
)
_lowerCAmelCase = "\\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowerCAmelCase = "The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README."
_lowerCAmelCase = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowerCAmelCase = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored)."
_lowerCAmelCase = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n"
_lowerCAmelCase = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found 'None'."
_lowerCAmelCase = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Languages\nLanguage Text\n"
_lowerCAmelCase = "The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`."
_lowerCAmelCase = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\n"
_lowerCAmelCase = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty."
_lowerCAmelCase = "\\n---\nlanguage:\n- zh\n- en\n---\n\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowerCAmelCase = "The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README."
_lowerCAmelCase = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n# Dataset Card My Dataset\n"
_lowerCAmelCase = "The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README."
_lowerCAmelCase = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowerCAmelCase = "The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README."
_lowerCAmelCase = ""
_lowerCAmelCase = "The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README."
_lowerCAmelCase = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowerCAmelCase = "The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections."
@pytest.mark.parametrize(
'''readme_md, expected_dict''' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def UpperCamelCase ( a , a ) -> Optional[Any]:
'''simple docstring'''
assert ReadMe.from_string(a , a ).to_dict() == expected_dict
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def UpperCamelCase ( a , a ) -> List[str]:
'''simple docstring'''
with pytest.raises(a , match=re.escape(expected_error.format(path='''root''' ) ) ):
__magic_name__ = ReadMe.from_string(a , a )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def UpperCamelCase ( a , a ) -> Any:
'''simple docstring'''
with pytest.raises(a , match=re.escape(expected_error.format(path='''root''' ) ) ):
ReadMe.from_string(a , a )
@pytest.mark.parametrize(
'''readme_md,''' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def UpperCamelCase ( a ) -> List[str]:
'''simple docstring'''
ReadMe.from_string(a , a , suppress_parsing_errors=a )
@pytest.mark.parametrize(
'''readme_md, expected_dict''' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def UpperCamelCase ( a , a ) -> Optional[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ = Path(a ) / '''README.md'''
with open(a , '''w+''' ) as readme_file:
readme_file.write(a )
__magic_name__ = ReadMe.from_readme(a , a ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def UpperCamelCase ( a , a ) -> Dict:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ = Path(a ) / '''README.md'''
with open(a , '''w+''' ) as readme_file:
readme_file.write(a )
__magic_name__ = expected_error.format(path=a )
with pytest.raises(a , match=re.escape(a ) ):
__magic_name__ = ReadMe.from_readme(a , a )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def UpperCamelCase ( a , a ) -> Optional[int]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ = Path(a ) / '''README.md'''
with open(a , '''w+''' ) as readme_file:
readme_file.write(a )
__magic_name__ = expected_error.format(path=a )
with pytest.raises(a , match=re.escape(a ) ):
ReadMe.from_readme(a , a )
@pytest.mark.parametrize(
'''readme_md,''' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def UpperCamelCase ( a ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ = Path(a ) / '''README.md'''
with open(a , '''w+''' ) as readme_file:
readme_file.write(a )
ReadMe.from_readme(a , a , suppress_parsing_errors=a )
| 98 |
'''simple docstring'''
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class _SCREAMING_SNAKE_CASE :
def __init__( self : List[str] , a__ : int , ):
__magic_name__ = parent
__magic_name__ = 13
__magic_name__ = 7
__magic_name__ = 30
__magic_name__ = self.seq_length + self.mem_len
__magic_name__ = 15
__magic_name__ = True
__magic_name__ = True
__magic_name__ = 99
__magic_name__ = [10, 50, 80]
__magic_name__ = 32
__magic_name__ = 32
__magic_name__ = 4
__magic_name__ = 8
__magic_name__ = 128
__magic_name__ = 2
__magic_name__ = 2
__magic_name__ = None
__magic_name__ = 1
__magic_name__ = 0
__magic_name__ = 3
__magic_name__ = self.vocab_size - 1
__magic_name__ = 0.01
def snake_case__ ( self : Union[str, Any] ):
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def snake_case__ ( self : str ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def snake_case__ ( self : Union[str, Any] , a__ : Tuple , a__ : Any , a__ : Union[str, Any] , a__ : int ):
__magic_name__ = TFTransfoXLModel(a__ )
__magic_name__ , __magic_name__ = model(a__ ).to_tuple()
__magic_name__ = {'''input_ids''': input_ids_a, '''mems''': mems_a}
__magic_name__ , __magic_name__ = model(a__ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def snake_case__ ( self : List[Any] , a__ : int , a__ : List[str] , a__ : List[str] , a__ : Dict ):
__magic_name__ = TFTransfoXLLMHeadModel(a__ )
__magic_name__ , __magic_name__ = model(a__ ).to_tuple()
__magic_name__ = {'''input_ids''': input_ids_a, '''labels''': lm_labels}
__magic_name__ , __magic_name__ = model(a__ ).to_tuple()
__magic_name__ , __magic_name__ = model([input_ids_a, mems_a] ).to_tuple()
__magic_name__ = {'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels}
__magic_name__ , __magic_name__ = model(a__ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def snake_case__ ( self : List[Any] , a__ : str , a__ : Optional[int] , a__ : Dict , a__ : List[Any] ):
__magic_name__ = TFTransfoXLForSequenceClassification(a__ )
__magic_name__ = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self : List[str] ):
__magic_name__ = self.prepare_config_and_inputs()
((__magic_name__) , (__magic_name__) , (__magic_name__) , (__magic_name__)) = config_and_inputs
__magic_name__ = {'''input_ids''': input_ids_a}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE ( __a ,__a ,unittest.TestCase ):
__SCREAMING_SNAKE_CASE :Optional[int] = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
__SCREAMING_SNAKE_CASE :Optional[Any] = () if is_tf_available() else ()
__SCREAMING_SNAKE_CASE :Any = (
{
"""feature-extraction""": TFTransfoXLModel,
"""text-classification""": TFTransfoXLForSequenceClassification,
"""text-generation""": TFTransfoXLLMHeadModel,
"""zero-shot""": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
__SCREAMING_SNAKE_CASE :List[str] = False
__SCREAMING_SNAKE_CASE :str = False
__SCREAMING_SNAKE_CASE :str = False
__SCREAMING_SNAKE_CASE :List[Any] = False
def snake_case__ ( self : int , a__ : int , a__ : List[str] , a__ : Any , a__ : Optional[Any] , a__ : int ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def snake_case__ ( self : int ):
__magic_name__ = TFTransfoXLModelTester(self )
__magic_name__ = ConfigTester(self , config_class=a__ , d_embed=37 )
def snake_case__ ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def snake_case__ ( self : int ):
self.model_tester.set_seed()
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*a__ )
def snake_case__ ( self : Optional[int] ):
self.model_tester.set_seed()
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*a__ )
def snake_case__ ( self : Any ):
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*a__ )
def snake_case__ ( self : Optional[int] ):
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
__magic_name__ = model_class(a__ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
__magic_name__ = model.get_output_embeddings()
assert isinstance(a__ , tf.keras.layers.Layer )
__magic_name__ = model.get_bias()
assert name is None
else:
__magic_name__ = model.get_output_embeddings()
assert x is None
__magic_name__ = model.get_bias()
assert name is None
def snake_case__ ( self : Dict ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def snake_case__ ( self : List[Any] ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ = TFTransfoXLModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@unittest.skip(reason='''This model doesn\'t play well with fit() due to not returning a single loss.''' )
def snake_case__ ( self : Any ):
pass
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@unittest.skip('''Skip test until #12651 is resolved.''' )
@slow
def snake_case__ ( self : int ):
__magic_name__ = TFTransfoXLLMHeadModel.from_pretrained('''transfo-xl-wt103''' )
# fmt: off
__magic_name__ = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
__magic_name__ = [33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0,33,1,1857,2,1,1009,4,1109,1_1739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
__magic_name__ = model.generate(a__ , max_length=200 , do_sample=a__ )
self.assertListEqual(output_ids[0].numpy().tolist() , a__ )
| 98 | 1 |
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ) -> Optional[Any]:
super().__init__()
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
speech_model=__A , speech_processor=__A , vae=__A , text_encoder=__A , tokenizer=__A , unet=__A , scheduler=__A , feature_extractor=__A , )
def __lowerCAmelCase ( self , __A = "auto" ) -> List[str]:
if slice_size == "auto":
lowerCAmelCase_ :Any = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__A )
def __lowerCAmelCase ( self ) -> Optional[int]:
self.enable_attention_slicing(__A )
@torch.no_grad()
def __call__( self , __A , __A=1_6000 , __A = 512 , __A = 512 , __A = 50 , __A = 7.5 , __A = None , __A = 1 , __A = 0.0 , __A = None , __A = None , __A = "pil" , __A = True , __A = None , __A = 1 , **__A , ) -> Optional[Any]:
lowerCAmelCase_ :Optional[Any] = self.speech_processor.feature_extractor(
__A , return_tensors="""pt""" , sampling_rate=__A ).input_features.to(self.device )
lowerCAmelCase_ :List[Any] = self.speech_model.generate(__A , max_length=48_0000 )
lowerCAmelCase_ :Optional[int] = self.speech_processor.tokenizer.batch_decode(__A , skip_special_tokens=__A , normalize=__A )[
0
]
if isinstance(__A , __A ):
lowerCAmelCase_ :List[Any] = 1
elif isinstance(__A , __A ):
lowerCAmelCase_ :Dict = len(__A )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(__A )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__A , __A ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(__A )}.""" )
# get prompt text embeddings
lowerCAmelCase_ :List[Any] = self.tokenizer(
__A , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
lowerCAmelCase_ :Tuple = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowerCAmelCase_ :Optional[int] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
lowerCAmelCase_ :int = text_input_ids[:, : self.tokenizer.model_max_length]
lowerCAmelCase_ :int = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Dict = text_embeddings.shape
lowerCAmelCase_ :Optional[Any] = text_embeddings.repeat(1 , __A , 1 )
lowerCAmelCase_ :str = text_embeddings.view(bs_embed * num_images_per_prompt , __A , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCAmelCase_ :int = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCAmelCase_ :List[str]
if negative_prompt is None:
lowerCAmelCase_ :Union[str, Any] = [""""""] * batch_size
elif type(__A ) is not type(__A ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(__A )} !="""
f""" {type(__A )}.""" )
elif isinstance(__A , __A ):
lowerCAmelCase_ :Optional[int] = [negative_prompt]
elif batch_size != len(__A ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(__A )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
""" the batch size of `prompt`.""" )
else:
lowerCAmelCase_ :Dict = negative_prompt
lowerCAmelCase_ :List[str] = text_input_ids.shape[-1]
lowerCAmelCase_ :Dict = self.tokenizer(
__A , padding="""max_length""" , max_length=__A , truncation=__A , return_tensors="""pt""" , )
lowerCAmelCase_ :Union[str, Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCAmelCase_ :Dict = uncond_embeddings.shape[1]
lowerCAmelCase_ :Union[str, Any] = uncond_embeddings.repeat(1 , __A , 1 )
lowerCAmelCase_ :Optional[Any] = uncond_embeddings.view(batch_size * num_images_per_prompt , __A , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCAmelCase_ :List[str] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCAmelCase_ :int = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowerCAmelCase_ :Optional[int] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowerCAmelCase_ :Optional[Any] = torch.randn(__A , generator=__A , device="""cpu""" , dtype=__A ).to(
self.device )
else:
lowerCAmelCase_ :List[str] = torch.randn(__A , generator=__A , device=self.device , dtype=__A )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
lowerCAmelCase_ :Tuple = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowerCAmelCase_ :Optional[int] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCAmelCase_ :List[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCAmelCase_ :List[Any] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCAmelCase_ :List[str] = {}
if accepts_eta:
lowerCAmelCase_ :Dict = eta
for i, t in enumerate(self.progress_bar(__A ) ):
# expand the latents if we are doing classifier free guidance
lowerCAmelCase_ :int = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCAmelCase_ :Dict = self.scheduler.scale_model_input(__A , __A )
# predict the noise residual
lowerCAmelCase_ :Optional[int] = self.unet(__A , __A , encoder_hidden_states=__A ).sample
# perform guidance
if do_classifier_free_guidance:
lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = noise_pred.chunk(2 )
lowerCAmelCase_ :Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase_ :int = self.scheduler.step(__A , __A , __A , **__A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__A , __A , __A )
lowerCAmelCase_ :str = 1 / 0.1_8_2_1_5 * latents
lowerCAmelCase_ :Union[str, Any] = self.vae.decode(__A ).sample
lowerCAmelCase_ :List[str] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCAmelCase_ :int = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCAmelCase_ :Dict = self.numpy_to_pil(__A )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=__A , nsfw_content_detected=__A )
| 84 |
def a ( A__ : int = 1000000 ) -> int:
"""simple docstring"""
_lowercase =1
_lowercase =1
_lowercase ={1: 1}
for inputa in range(2 , A__ ):
_lowercase =0
_lowercase =inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
_lowercase =(3 * number) + 1
counter += 1
if inputa not in counters:
_lowercase =counter
if counter > pre_counter:
_lowercase =inputa
_lowercase =counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 205 | 0 |
'''simple docstring'''
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = []
_lowerCAmelCase = set({"""(""", """[""", """{"""} )
_lowerCAmelCase = set({""")""", """]""", """}"""} )
_lowerCAmelCase = {'{': '}', '[': ']', '(': ')'}
for i in range(len(_UpperCAmelCase ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(_UpperCAmelCase ) == 0 or (len(_UpperCAmelCase ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(_UpperCAmelCase ) == 0
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase = input("""Enter sequence of brackets: """ )
if is_balanced(_UpperCAmelCase ):
print(_UpperCAmelCase , """is balanced""" )
else:
print(_UpperCAmelCase , """is not balanced""" )
if __name__ == "__main__":
main()
| 350 |
'''simple docstring'''
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class UpperCAmelCase :
def __init__( self : str , __snake_case : Optional[int] , __snake_case : Dict=13 , __snake_case : Dict=32 , __snake_case : List[str]=2 , __snake_case : str=3 , __snake_case : str=16 , __snake_case : int=[1, 2, 1] , __snake_case : Dict=[2, 2, 4] , __snake_case : int=2 , __snake_case : str=2.0 , __snake_case : List[str]=True , __snake_case : Optional[Any]=0.0 , __snake_case : Optional[Any]=0.0 , __snake_case : Dict=0.1 , __snake_case : Tuple="gelu" , __snake_case : str=False , __snake_case : Any=True , __snake_case : Union[str, Any]=0.02 , __snake_case : Union[str, Any]=1E-5 , __snake_case : Optional[Any]=True , __snake_case : Union[str, Any]=None , __snake_case : Any=True , __snake_case : Optional[Any]=10 , __snake_case : Tuple=8 , __snake_case : List[Any]=["stage1", "stage2", "stage3"] , __snake_case : Dict=[1, 2, 3] , ) -> List[str]:
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = embed_dim
_lowerCAmelCase = depths
_lowerCAmelCase = num_heads
_lowerCAmelCase = window_size
_lowerCAmelCase = mlp_ratio
_lowerCAmelCase = qkv_bias
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = drop_path_rate
_lowerCAmelCase = hidden_act
_lowerCAmelCase = use_absolute_embeddings
_lowerCAmelCase = patch_norm
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = initializer_range
_lowerCAmelCase = is_training
_lowerCAmelCase = scope
_lowerCAmelCase = use_labels
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = encoder_stride
_lowerCAmelCase = out_features
_lowerCAmelCase = out_indices
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
_lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : int ) -> Any:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def lowercase__ ( self : List[str] , __snake_case : Tuple , __snake_case : Optional[Any] , __snake_case : Optional[Any] ) -> Dict:
_lowerCAmelCase = MaskFormerSwinModel(config=__snake_case )
model.to(__snake_case )
model.eval()
_lowerCAmelCase = model(__snake_case )
_lowerCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_lowerCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowercase__ ( self : Optional[Any] , __snake_case : Any , __snake_case : List[Any] , __snake_case : Optional[int] ) -> Union[str, Any]:
_lowerCAmelCase = MaskFormerSwinBackbone(config=__snake_case )
model.to(__snake_case )
model.eval()
_lowerCAmelCase = model(__snake_case )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(__snake_case ):
_lowerCAmelCase = ["""stem"""]
_lowerCAmelCase = MaskFormerSwinBackbone(config=__snake_case )
def lowercase__ ( self : Union[str, Any] ) -> int:
_lowerCAmelCase = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = config_and_inputs
_lowerCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
_lowercase: str = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
_lowercase: List[Any] = {'''feature-extraction''': MaskFormerSwinModel} if is_torch_available() else {}
_lowercase: Optional[Any] = False
_lowercase: List[str] = False
_lowercase: Optional[Any] = False
_lowercase: str = False
_lowercase: Union[str, Any] = False
def lowercase__ ( self : Optional[Any] ) -> str:
_lowerCAmelCase = MaskFormerSwinModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=__snake_case , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def lowercase__ ( self : Tuple ) -> str:
pass
def lowercase__ ( self : str ) -> str:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self : str ) -> Any:
return
def lowercase__ ( self : Tuple ) -> Tuple:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def lowercase__ ( self : Optional[int] ) -> str:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__snake_case )
@unittest.skip("""Swin does not use inputs_embeds""" )
def lowercase__ ( self : str ) -> List[str]:
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def lowercase__ ( self : Any ) -> Union[str, Any]:
pass
def lowercase__ ( self : Optional[Any] ) -> List[str]:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(__snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__snake_case , nn.Linear ) )
def lowercase__ ( self : str ) -> Any:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(__snake_case )
_lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase = [*signature.parameters.keys()]
_lowerCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __snake_case )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def lowercase__ ( self : Union[str, Any] ) -> Any:
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def lowercase__ ( self : List[str] ) -> Any:
pass
def lowercase__ ( self : Tuple , __snake_case : Optional[int] , __snake_case : str , __snake_case : str , __snake_case : Optional[int] ) -> Optional[int]:
_lowerCAmelCase = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(__snake_case , __snake_case ) )
_lowerCAmelCase = outputs.hidden_states
_lowerCAmelCase = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__snake_case ) , __snake_case )
# Swin has a different seq_length
_lowerCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowercase__ ( self : List[str] ) -> Any:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_lowerCAmelCase = True
self.check_hidden_states_output(__snake_case , __snake_case , __snake_case , __snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase = True
self.check_hidden_states_output(__snake_case , __snake_case , __snake_case , __snake_case )
def lowercase__ ( self : Any ) -> Any:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = 3
_lowerCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_lowerCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_lowerCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_lowerCAmelCase = True
self.check_hidden_states_output(__snake_case , __snake_case , __snake_case , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase = True
self.check_hidden_states_output(__snake_case , __snake_case , __snake_case , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def lowercase__ ( self : List[Any] ) -> Any:
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def lowercase__ ( self : Any ) -> str:
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def lowercase__ ( self : Dict ) -> Optional[int]:
pass
def lowercase__ ( self : List[str] ) -> Tuple:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(__snake_case : List[str] ):
_lowerCAmelCase = 0
return t
def check_equivalence(__snake_case : Any , __snake_case : Optional[Any] , __snake_case : Tuple , __snake_case : Dict={} ):
with torch.no_grad():
_lowerCAmelCase = model(**__snake_case , return_dict=__snake_case , **__snake_case )
_lowerCAmelCase = model(**__snake_case , return_dict=__snake_case , **__snake_case ).to_tuple()
def recursive_check(__snake_case : int , __snake_case : Any ):
if isinstance(__snake_case , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(__snake_case , __snake_case ):
recursive_check(__snake_case , __snake_case )
elif isinstance(__snake_case , __snake_case ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(__snake_case , __snake_case )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(__snake_case ) , set_nan_tensor_to_zero(__snake_case ) , atol=1E-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
f" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"
f" {torch.isnan(__snake_case ).any()} and `inf`: {torch.isinf(__snake_case )}. Dict has"
f" `nan`: {torch.isnan(__snake_case ).any()} and `inf`: {torch.isinf(__snake_case )}."
) , )
recursive_check(__snake_case , __snake_case )
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(__snake_case )
model.to(__snake_case )
model.eval()
_lowerCAmelCase = self._prepare_for_class(__snake_case , __snake_case )
_lowerCAmelCase = self._prepare_for_class(__snake_case , __snake_case )
check_equivalence(__snake_case , __snake_case , __snake_case )
_lowerCAmelCase = self._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
_lowerCAmelCase = self._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
check_equivalence(__snake_case , __snake_case , __snake_case )
_lowerCAmelCase = self._prepare_for_class(__snake_case , __snake_case )
_lowerCAmelCase = self._prepare_for_class(__snake_case , __snake_case )
check_equivalence(__snake_case , __snake_case , __snake_case , {"""output_hidden_states""": True} )
_lowerCAmelCase = self._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
_lowerCAmelCase = self._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
check_equivalence(__snake_case , __snake_case , __snake_case , {"""output_hidden_states""": True} )
@require_torch
class UpperCAmelCase ( unittest.TestCase , snake_case_ ):
_lowercase: int = (MaskFormerSwinBackbone,) if is_torch_available() else ()
_lowercase: Dict = MaskFormerSwinConfig
def lowercase__ ( self : Union[str, Any] ) -> str:
_lowerCAmelCase = MaskFormerSwinModelTester(self )
def lowercase__ ( self : str ) -> Tuple:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
_lowerCAmelCase = backbone_class(__snake_case )
backbone.to(__snake_case )
backbone.eval()
_lowerCAmelCase = backbone(**__snake_case )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , __snake_case )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
_lowerCAmelCase = backbone(**__snake_case , output_hidden_states=__snake_case )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
_lowerCAmelCase = backbone(**__snake_case , output_attentions=__snake_case )
self.assertIsNotNone(outputs.attentions )
| 220 | 0 |
"""simple docstring"""
def _lowercase ( __snake_case = 50_000_000 ) -> int:
__lowerCAmelCase : List[Any] = set()
__lowerCAmelCase : Union[str, Any] = int((limit - 24) ** (1 / 2) )
__lowerCAmelCase : Dict = set(range(3 ,prime_square_limit + 1 ,2 ) )
primes.add(2 )
for p in range(3 ,prime_square_limit + 1 ,2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p ,prime_square_limit + 1 ,__snake_case ) ) )
for primea in primes:
__lowerCAmelCase : List[str] = primea * primea
for primea in primes:
__lowerCAmelCase : List[Any] = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
__lowerCAmelCase : Tuple = primea * primea * primea * primea
__lowerCAmelCase : Dict = square + cube + tetr
if total >= limit:
break
ret.add(__snake_case )
return len(__snake_case )
if __name__ == "__main__":
print(F"""{solution() = }""") | 269 |
"""simple docstring"""
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self: Tuple , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: List[str]=13 , _SCREAMING_SNAKE_CASE: Tuple=7 , _SCREAMING_SNAKE_CASE: int=True , _SCREAMING_SNAKE_CASE: Optional[Any]=True , _SCREAMING_SNAKE_CASE: str=False , _SCREAMING_SNAKE_CASE: Optional[Any]=True , _SCREAMING_SNAKE_CASE: int=99 , _SCREAMING_SNAKE_CASE: int=32 , _SCREAMING_SNAKE_CASE: List[str]=5 , _SCREAMING_SNAKE_CASE: Union[str, Any]=4 , _SCREAMING_SNAKE_CASE: int=64 , _SCREAMING_SNAKE_CASE: List[str]="gelu" , _SCREAMING_SNAKE_CASE: str=0.1 , _SCREAMING_SNAKE_CASE: Any=0.1 , _SCREAMING_SNAKE_CASE: Optional[int]=512 , _SCREAMING_SNAKE_CASE: Tuple=16 , _SCREAMING_SNAKE_CASE: Any=2 , _SCREAMING_SNAKE_CASE: List[str]=0.02 , _SCREAMING_SNAKE_CASE: Tuple=3 , _SCREAMING_SNAKE_CASE: Optional[Any]=4 , _SCREAMING_SNAKE_CASE: int=None , _SCREAMING_SNAKE_CASE: int=2 , _SCREAMING_SNAKE_CASE: str=2 , _SCREAMING_SNAKE_CASE: Union[str, Any]=2 , _SCREAMING_SNAKE_CASE: List[Any]=2 , _SCREAMING_SNAKE_CASE: int=4 , _SCREAMING_SNAKE_CASE: List[str]=1 , ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = parent
__lowerCAmelCase : Optional[Any] = batch_size
__lowerCAmelCase : Union[str, Any] = seq_length
__lowerCAmelCase : Optional[Any] = is_training
__lowerCAmelCase : Optional[int] = use_input_mask
__lowerCAmelCase : Dict = use_token_type_ids
__lowerCAmelCase : Dict = use_labels
__lowerCAmelCase : Dict = vocab_size
__lowerCAmelCase : Tuple = hidden_size
__lowerCAmelCase : List[Any] = num_hidden_layers
__lowerCAmelCase : Union[str, Any] = num_attention_heads
__lowerCAmelCase : Tuple = intermediate_size
__lowerCAmelCase : List[Any] = hidden_act
__lowerCAmelCase : Optional[Any] = hidden_dropout_prob
__lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
__lowerCAmelCase : Optional[int] = max_position_embeddings
__lowerCAmelCase : Union[str, Any] = type_vocab_size
__lowerCAmelCase : Optional[int] = type_sequence_label_size
__lowerCAmelCase : Dict = initializer_range
__lowerCAmelCase : Tuple = num_labels
__lowerCAmelCase : Optional[Any] = num_choices
__lowerCAmelCase : Union[str, Any] = scope
__lowerCAmelCase : Optional[Any] = q_groups
__lowerCAmelCase : Optional[int] = k_groups
__lowerCAmelCase : Any = v_groups
__lowerCAmelCase : int = post_attention_groups
__lowerCAmelCase : List[str] = intermediate_groups
__lowerCAmelCase : Optional[Any] = output_groups
def _SCREAMING_SNAKE_CASE ( self: Dict) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__lowerCAmelCase : Union[str, Any] = None
if self.use_input_mask:
__lowerCAmelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length])
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : List[Any] = None
__lowerCAmelCase : str = None
if self.use_labels:
__lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__lowerCAmelCase : str = ids_tensor([self.batch_size] , self.num_choices)
__lowerCAmelCase : Any = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> int:
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Tuple) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[Any] = SqueezeBertModel(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : List[Any] = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Tuple) -> Dict:
"""simple docstring"""
__lowerCAmelCase : int = SqueezeBertForMaskedLM(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _SCREAMING_SNAKE_CASE ( self: Optional[int] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Union[str, Any]) -> int:
"""simple docstring"""
__lowerCAmelCase : str = SqueezeBertForQuestionAnswering(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : Union[str, Any] = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: Tuple) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.num_labels
__lowerCAmelCase : Union[str, Any] = SqueezeBertForSequenceClassification(_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : int = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[int]) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Dict = self.num_labels
__lowerCAmelCase : Optional[int] = SqueezeBertForTokenClassification(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : List[str] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: int) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.num_choices
__lowerCAmelCase : str = SqueezeBertForMultipleChoice(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : int = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__lowerCAmelCase : Union[str, Any] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__lowerCAmelCase : str = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _SCREAMING_SNAKE_CASE ( self: str) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs()
((__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase)) : Union[str, Any] = config_and_inputs
__lowerCAmelCase : int = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
SCREAMING_SNAKE_CASE = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
def _SCREAMING_SNAKE_CASE ( self: Dict) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Any = SqueezeBertModelTester(self)
__lowerCAmelCase : Optional[int] = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , dim=37)
def _SCREAMING_SNAKE_CASE ( self: Any) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> Any:
"""simple docstring"""
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> int:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Any) -> int:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> str:
"""simple docstring"""
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*_SCREAMING_SNAKE_CASE)
@slow
def _SCREAMING_SNAKE_CASE ( self: Any) -> Dict:
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Optional[Any] = SqueezeBertModel.from_pretrained(_SCREAMING_SNAKE_CASE)
self.assertIsNotNone(_SCREAMING_SNAKE_CASE)
@require_sentencepiece
@require_tokenizers
@require_torch
class A__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self: int) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = SqueezeBertForSequenceClassification.from_pretrained("squeezebert/squeezebert-mnli")
__lowerCAmelCase : List[Any] = torch.tensor([[1, 2_9414, 232, 328, 740, 1140, 1_2695, 69, 13, 1588, 2]])
__lowerCAmelCase : List[Any] = model(_SCREAMING_SNAKE_CASE)[0]
__lowerCAmelCase : Any = torch.Size((1, 3))
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = torch.tensor([[0.6401, -0.0349, -0.6041]])
self.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-4)) | 269 | 1 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Tuple , __UpperCAmelCase : Any , __UpperCAmelCase : Tuple=7 , __UpperCAmelCase : str=3 , __UpperCAmelCase : Union[str, Any]=30 , __UpperCAmelCase : Optional[Any]=400 , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : int=None , __UpperCAmelCase : Dict=True , __UpperCAmelCase : Optional[int]=1 / 255 , __UpperCAmelCase : Dict=True , __UpperCAmelCase : Any=[0.5, 0.5, 0.5] , __UpperCAmelCase : Optional[int]=[0.5, 0.5, 0.5] , __UpperCAmelCase : Any=True , ) ->Dict:
"""simple docstring"""
a = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1_333}
a = parent
a = batch_size
a = num_channels
a = min_resolution
a = max_resolution
a = do_resize
a = size
a = do_rescale
a = rescale_factor
a = do_normalize
a = image_mean
a = image_std
a = do_pad
def __lowerCAmelCase ( self : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def __lowerCAmelCase ( self : int , __UpperCAmelCase : Any , __UpperCAmelCase : Any=False ) ->int:
"""simple docstring"""
if not batched:
a = image_inputs[0]
if isinstance(__UpperCAmelCase , Image.Image ):
a , a = image.size
else:
a , a = image.shape[1], image.shape[2]
if w < h:
a = int(self.size['''shortest_edge'''] * h / w )
a = self.size['''shortest_edge''']
elif w > h:
a = self.size['''shortest_edge''']
a = int(self.size['''shortest_edge'''] * w / h )
else:
a = self.size['''shortest_edge''']
a = self.size['''shortest_edge''']
else:
a = []
for image in image_inputs:
a , a = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
a = max(__UpperCAmelCase , key=lambda __UpperCAmelCase : item[0] )[0]
a = max(__UpperCAmelCase , key=lambda __UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase_ ( lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = DetrImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self : Tuple ) ->Optional[Any]:
"""simple docstring"""
a = DetrImageProcessingTester(self )
@property
def __lowerCAmelCase ( self : Union[str, Any] ) ->Any:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self : Dict ) ->Union[str, Any]:
"""simple docstring"""
a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''image_std''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''do_rescale''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''rescale_factor''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''size''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''do_pad''' ) )
def __lowerCAmelCase ( self : Tuple ) ->Dict:
"""simple docstring"""
a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1_333} )
self.assertEqual(image_processor.do_pad , __UpperCAmelCase )
a = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__UpperCAmelCase )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , __UpperCAmelCase )
def __lowerCAmelCase ( self : Union[str, Any] ) ->Dict:
"""simple docstring"""
pass
def __lowerCAmelCase ( self : List[Any] ) ->Any:
"""simple docstring"""
a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a , a = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase )
a = image_processing(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCAmelCase ( self : Union[str, Any] ) ->List[str]:
"""simple docstring"""
a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a = image_processing(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCAmelCase ( self : str ) ->List[Any]:
"""simple docstring"""
a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a = image_processing(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __lowerCAmelCase ( self : Dict ) ->Union[str, Any]:
"""simple docstring"""
a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
a = json.loads(f.read() )
a = {'''image_id''': 39_769, '''annotations''': target}
# encode them
a = DetrImageProcessor.from_pretrained('''facebook/detr-resnet-50''' )
a = image_processing(images=__UpperCAmelCase , annotations=__UpperCAmelCase , return_tensors='''pt''' )
# verify pixel values
a = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['''pixel_values'''].shape , __UpperCAmelCase )
a = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __UpperCAmelCase , atol=1e-4 ) )
# verify area
a = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __UpperCAmelCase ) )
# verify boxes
a = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __UpperCAmelCase )
a = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __UpperCAmelCase , atol=1e-3 ) )
# verify image_id
a = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __UpperCAmelCase ) )
# verify is_crowd
a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __UpperCAmelCase ) )
# verify class_labels
a = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __UpperCAmelCase ) )
# verify orig_size
a = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __UpperCAmelCase ) )
# verify size
a = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __UpperCAmelCase ) )
@slow
def __lowerCAmelCase ( self : Optional[Any] ) ->Optional[Any]:
"""simple docstring"""
a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
a = json.loads(f.read() )
a = {'''file_name''': '''000000039769.png''', '''image_id''': 39_769, '''segments_info''': target}
a = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
a = DetrImageProcessor.from_pretrained('''facebook/detr-resnet-50-panoptic''' )
a = image_processing(images=__UpperCAmelCase , annotations=__UpperCAmelCase , masks_path=__UpperCAmelCase , return_tensors='''pt''' )
# verify pixel values
a = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['''pixel_values'''].shape , __UpperCAmelCase )
a = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __UpperCAmelCase , atol=1e-4 ) )
# verify area
a = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __UpperCAmelCase ) )
# verify boxes
a = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __UpperCAmelCase )
a = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __UpperCAmelCase , atol=1e-3 ) )
# verify image_id
a = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __UpperCAmelCase ) )
# verify is_crowd
a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __UpperCAmelCase ) )
# verify class_labels
a = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __UpperCAmelCase ) )
# verify masks
a = 822_873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __UpperCAmelCase )
# verify orig_size
a = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __UpperCAmelCase ) )
# verify size
a = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __UpperCAmelCase ) )
| 26 |
from math import ceil, sqrt
def _a ( a :int = 1_000_000 ) -> int:
a = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
a = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
a = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f"""{solution() = }""")
| 26 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def a__ ( lowercase : List[str], lowercase : Dict, lowercase : Union[str, Any], lowercase : int ) -> int:
"""simple docstring"""
_UpperCamelCase = FunnelConfig.from_json_file(UpperCamelCase__ )
print(F"""Building PyTorch model from configuration: {config}""" )
_UpperCamelCase = FunnelBaseModel(UpperCamelCase__ ) if base_model else FunnelModel(UpperCamelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict(), UpperCamelCase__ )
if __name__ == "__main__":
lowercase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--base_model', action='store_true', help='Whether you want just the base model (no decoder) or not.'
)
lowercase__ : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 324 |
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
UpperCAmelCase = _modexpt(UpperCamelCase__ , exponent // 2 , UpperCamelCase__ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(UpperCamelCase__ , exponent - 1 , UpperCamelCase__ )) % modulo_value
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ = 1777 , UpperCamelCase__ = 1855 , UpperCamelCase__ = 8 ) -> int:
'''simple docstring'''
UpperCAmelCase = base
for _ in range(1 , UpperCamelCase__ ):
UpperCAmelCase = _modexpt(UpperCamelCase__ , UpperCamelCase__ , 10**digits )
return result
if __name__ == "__main__":
print(F'{solution() = }')
| 273 | 0 |
"""simple docstring"""
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
UpperCAmelCase : Optional[Any] = get_logger(__name__)
UpperCAmelCase : Tuple = R'\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n'
class lowerCamelCase__ :
"""simple docstring"""
@add_start_docstrings(UpperCamelCase )
def __call__( self : Tuple , UpperCamelCase : jnp.ndarray , UpperCamelCase : jnp.ndarray ):
'''simple docstring'''
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class lowerCamelCase__ :
"""simple docstring"""
@add_start_docstrings(UpperCamelCase )
def __call__( self : Any , UpperCamelCase : jnp.ndarray , UpperCamelCase : jnp.ndarray ):
'''simple docstring'''
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class lowerCamelCase__ ( A ):
"""simple docstring"""
@add_start_docstrings(UpperCamelCase )
def __call__( self : List[Any] , UpperCamelCase : jnp.ndarray , UpperCamelCase : jnp.ndarray , UpperCamelCase : int , **UpperCamelCase : int ):
'''simple docstring'''
for processor in self:
__UpperCAmelCase : str = inspect.signature(processor.__call__ ).parameters
if len(UpperCamelCase ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
f'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
f'''{processor.__class__} are passed to the logits processor.''' )
__UpperCAmelCase : Any = processor(UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase )
else:
__UpperCAmelCase : str = processor(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return scores
class lowerCamelCase__ ( A ):
"""simple docstring"""
def __init__( self : Tuple , UpperCamelCase : float ):
'''simple docstring'''
if not isinstance(UpperCamelCase , UpperCamelCase ) or not (temperature > 0):
raise ValueError(f'''`temperature` has to be a strictly positive float, but is {temperature}''' )
__UpperCAmelCase : Any = temperature
def __call__( self : Optional[Any] , UpperCamelCase : jnp.ndarray , UpperCamelCase : jnp.ndarray , UpperCamelCase : int ):
'''simple docstring'''
__UpperCAmelCase : Tuple = scores / self.temperature
return scores
class lowerCamelCase__ ( A ):
"""simple docstring"""
def __init__( self : str , UpperCamelCase : float , UpperCamelCase : float = -float("""Inf""" ) , UpperCamelCase : int = 1 ):
'''simple docstring'''
if not isinstance(UpperCamelCase , UpperCamelCase ) or (top_p < 0 or top_p > 1.0):
raise ValueError(f'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(UpperCamelCase , UpperCamelCase ) or (min_tokens_to_keep < 1):
raise ValueError(f'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
__UpperCAmelCase : Any = top_p
__UpperCAmelCase : int = filter_value
__UpperCAmelCase : List[str] = min_tokens_to_keep
def __call__( self : Optional[Any] , UpperCamelCase : jnp.ndarray , UpperCamelCase : jnp.ndarray , UpperCamelCase : int ):
'''simple docstring'''
__UpperCAmelCase ,__UpperCAmelCase : List[Any] = lax.top_k(UpperCamelCase , scores.shape[-1] )
__UpperCAmelCase : List[str] = jnp.full_like(UpperCamelCase , self.filter_value )
__UpperCAmelCase : List[Any] = jax.nn.softmax(UpperCamelCase , axis=-1 ).cumsum(axis=-1 )
__UpperCAmelCase : int = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
__UpperCAmelCase : Dict = jnp.roll(UpperCamelCase , 1 )
score_mask |= score_mask.at[:, 0].set(UpperCamelCase )
# min tokens to keep
__UpperCAmelCase : int = score_mask.at[:, : self.min_tokens_to_keep].set(UpperCamelCase )
__UpperCAmelCase : Dict = jnp.where(UpperCamelCase , UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : List[str] = jax.lax.sort_key_val(UpperCamelCase , UpperCamelCase )[-1]
return next_scores
class lowerCamelCase__ ( A ):
"""simple docstring"""
def __init__( self : Optional[int] , UpperCamelCase : int , UpperCamelCase : float = -float("""Inf""" ) , UpperCamelCase : int = 1 ):
'''simple docstring'''
if not isinstance(UpperCamelCase , UpperCamelCase ) or top_k <= 0:
raise ValueError(f'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
__UpperCAmelCase : Dict = max(UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : Tuple = filter_value
def __call__( self : Tuple , UpperCamelCase : jnp.ndarray , UpperCamelCase : jnp.ndarray , UpperCamelCase : int ):
'''simple docstring'''
__UpperCAmelCase ,__UpperCAmelCase : Optional[int] = scores.shape
__UpperCAmelCase : Any = jnp.full(batch_size * vocab_size , self.filter_value )
__UpperCAmelCase : Union[str, Any] = min(self.top_k , scores.shape[-1] ) # Safety check
__UpperCAmelCase ,__UpperCAmelCase : List[Any] = lax.top_k(UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : Tuple = jnp.broadcast_to((jnp.arange(UpperCamelCase ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
__UpperCAmelCase : str = topk_scores.flatten()
__UpperCAmelCase : Tuple = topk_indices.flatten() + shift
__UpperCAmelCase : str = next_scores_flat.at[topk_indices_flat].set(UpperCamelCase )
__UpperCAmelCase : Tuple = next_scores_flat.reshape(UpperCamelCase , UpperCamelCase )
return next_scores
class lowerCamelCase__ ( A ):
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCamelCase : int ):
'''simple docstring'''
__UpperCAmelCase : str = bos_token_id
def __call__( self : List[str] , UpperCamelCase : jnp.ndarray , UpperCamelCase : jnp.ndarray , UpperCamelCase : int ):
'''simple docstring'''
__UpperCAmelCase : Tuple = jnp.full(scores.shape , -float("""inf""" ) )
__UpperCAmelCase : Optional[int] = 1 - jnp.bool_(cur_len - 1 )
__UpperCAmelCase : Optional[int] = jnp.where(UpperCamelCase , new_scores.at[:, self.bos_token_id].set(0 ) , UpperCamelCase )
return scores
class lowerCamelCase__ ( A ):
"""simple docstring"""
def __init__( self : List[str] , UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
__UpperCAmelCase : List[str] = max_length
__UpperCAmelCase : Optional[Any] = eos_token_id
def __call__( self : Optional[int] , UpperCamelCase : jnp.ndarray , UpperCamelCase : jnp.ndarray , UpperCamelCase : int ):
'''simple docstring'''
__UpperCAmelCase : Any = jnp.full(scores.shape , -float("""inf""" ) )
__UpperCAmelCase : Union[str, Any] = 1 - jnp.bool_(cur_len - self.max_length + 1 )
__UpperCAmelCase : Optional[Any] = jnp.where(UpperCamelCase , new_scores.at[:, self.eos_token_id].set(0 ) , UpperCamelCase )
return scores
class lowerCamelCase__ ( A ):
"""simple docstring"""
def __init__( self : List[str] , UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
if not isinstance(UpperCamelCase , UpperCamelCase ) or min_length < 0:
raise ValueError(f'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(UpperCamelCase , UpperCamelCase ) or eos_token_id < 0:
raise ValueError(f'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
__UpperCAmelCase : Any = min_length
__UpperCAmelCase : Optional[Any] = eos_token_id
def __call__( self : Any , UpperCamelCase : jnp.ndarray , UpperCamelCase : jnp.ndarray , UpperCamelCase : int ):
'''simple docstring'''
__UpperCAmelCase : int = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
__UpperCAmelCase : Any = jnp.where(UpperCamelCase , scores.at[:, self.eos_token_id].set(-float("""inf""" ) ) , UpperCamelCase )
return scores
class lowerCamelCase__ ( A ):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCamelCase : Optional[int] , UpperCamelCase : str ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = list(UpperCamelCase )
__UpperCAmelCase : Optional[int] = begin_index
def __call__( self : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : int ):
'''simple docstring'''
__UpperCAmelCase : Tuple = 1 - jnp.bool_(cur_len - self.begin_index )
__UpperCAmelCase : Tuple = jnp.where(UpperCamelCase , scores.at[:, self.begin_suppress_tokens].set(-float("""inf""" ) ) , UpperCamelCase )
return scores
class lowerCamelCase__ ( A ):
"""simple docstring"""
def __init__( self : List[str] , UpperCamelCase : list ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = list(UpperCamelCase )
def __call__( self : Union[str, Any] , UpperCamelCase : jnp.ndarray , UpperCamelCase : jnp.ndarray , UpperCamelCase : int ):
'''simple docstring'''
__UpperCAmelCase : int = scores.at[..., self.suppress_tokens].set(-float("""inf""" ) )
return scores
class lowerCamelCase__ ( A ):
"""simple docstring"""
def __init__( self : Any , UpperCamelCase : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = dict(UpperCamelCase )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
__UpperCAmelCase : str = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
__UpperCAmelCase : Optional[Any] = force_token_array.at[index].set(UpperCamelCase )
__UpperCAmelCase : Optional[int] = jnp.intaa(UpperCamelCase )
def __call__( self : Tuple , UpperCamelCase : jnp.ndarray , UpperCamelCase : jnp.ndarray , UpperCamelCase : int ):
'''simple docstring'''
def _force_token(UpperCamelCase : Optional[int] ):
__UpperCAmelCase : Tuple = scores.shape[0]
__UpperCAmelCase : Optional[int] = self.force_token_array[generation_idx]
__UpperCAmelCase : Tuple = jnp.ones_like(UpperCamelCase , dtype=scores.dtype ) * -float("""inf""" )
__UpperCAmelCase : Union[str, Any] = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
__UpperCAmelCase : Tuple = lax.dynamic_update_slice(UpperCamelCase , UpperCamelCase , (0, current_token) )
return new_scores
__UpperCAmelCase : str = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(UpperCamelCase ) , lambda: scores , ) , )
return scores
class lowerCamelCase__ ( A ):
"""simple docstring"""
def __init__( self : Any , UpperCamelCase : str , UpperCamelCase : int , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Tuple = generate_config.eos_token_id
__UpperCAmelCase : Optional[Any] = generate_config.no_timestamps_token_id
__UpperCAmelCase : str = generate_config.no_timestamps_token_id + 1
__UpperCAmelCase : Dict = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(UpperCamelCase , """max_initial_timestamp_index""" ):
__UpperCAmelCase : Optional[Any] = generate_config.max_initial_timestamp_index
else:
__UpperCAmelCase : int = model_config.vocab_size
if self.max_initial_timestamp_index is None:
__UpperCAmelCase : Union[str, Any] = model_config.vocab_size
def __call__( self : Optional[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : str , UpperCamelCase : int ):
'''simple docstring'''
__UpperCAmelCase : List[str] = scores.at[:, self.no_timestamps_token_id].set(-float("""inf""" ) )
def handle_pairs(UpperCamelCase : int , UpperCamelCase : Union[str, Any] ):
__UpperCAmelCase : Optional[Any] = jnp.where((cur_len - self.begin_index) >= 1 , UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : str = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , UpperCamelCase , )
__UpperCAmelCase : Union[str, Any] = jnp.where((cur_len - self.begin_index) < 2 , UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : Optional[Any] = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , UpperCamelCase , UpperCamelCase , )
return jnp.where(
UpperCamelCase , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("""inf""" ) ) , scores_k.at[: self.eos_token_id].set(-float("""inf""" ) ) , ) , UpperCamelCase , )
__UpperCAmelCase : List[str] = jax.vmap(UpperCamelCase )(UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : Optional[Any] = jnp.where(cur_len == self.begin_index , UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , UpperCamelCase , )
__UpperCAmelCase : str = self.timestamp_begin + self.max_initial_timestamp_index
__UpperCAmelCase : Tuple = jnp.where(
UpperCamelCase , scores.at[:, last_allowed + 1 :].set(-float("""inf""" ) ) , UpperCamelCase , )
# if sum of probability over timestamps is above any other token, sample timestamp
__UpperCAmelCase : Union[str, Any] = jax.nn.log_softmax(UpperCamelCase , axis=-1 )
def handle_cumulative_probs(UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict ):
__UpperCAmelCase : Dict = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
__UpperCAmelCase : Optional[Any] = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("""inf""" ) ) , UpperCamelCase , )
__UpperCAmelCase : Tuple = jax.vmap(UpperCamelCase )(UpperCamelCase , UpperCamelCase )
return scores
| 320 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
UpperCAmelCase : str = logging.get_logger(__name__)
class lowerCamelCase__ ( A ):
"""simple docstring"""
def __init__( self : Optional[Any] , *UpperCamelCase : str , **UpperCamelCase : List[str] ):
'''simple docstring'''
warnings.warn(
"""The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use ChineseCLIPImageProcessor instead.""" , UpperCamelCase , )
super().__init__(*UpperCamelCase , **UpperCamelCase )
| 320 | 1 |
__UpperCAmelCase = {
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 29 | """simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCAmelCase__ = """platform"""
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class a :
_snake_case : Tuple = PegasusConfig
_snake_case : int = {}
_snake_case : str = 'gelu'
def __init__( self : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : int=13 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : str=True , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : int=99 , __lowerCAmelCase : List[Any]=32 , __lowerCAmelCase : Dict=5 , __lowerCAmelCase : int=4 , __lowerCAmelCase : Dict=37 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : Union[str, Any]=20 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : Union[str, Any]=1 , __lowerCAmelCase : Any=0 , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = pad_token_id
_UpperCAmelCase = bos_token_id
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
_UpperCAmelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCAmelCase = np.concatenate([input_ids, eos_tensor] , axis=1 )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_UpperCAmelCase = prepare_pegasus_inputs_dict(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return config, inputs_dict
def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int ):
_UpperCAmelCase = 20
_UpperCAmelCase = model_class_name(__lowerCAmelCase )
_UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] )
_UpperCAmelCase , _UpperCAmelCase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
_UpperCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, :-1] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , )
_UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, -1:] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__lowerCAmelCase , )
_UpperCAmelCase = model.decode(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any ):
_UpperCAmelCase = 20
_UpperCAmelCase = model_class_name(__lowerCAmelCase )
_UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] )
_UpperCAmelCase , _UpperCAmelCase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_UpperCAmelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, :-1] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , )
_UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, -1:] , __lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , )
_UpperCAmelCase = model.decode(__lowerCAmelCase , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase )
_UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase=None ,lowercase=None ,):
"""simple docstring"""
if attention_mask is None:
_UpperCAmelCase = np.not_equal(lowercase ,config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
_UpperCAmelCase = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape ,dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ).astype(np.inta ),
] ,axis=-1 ,)
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class a ( lowerCAmelCase_ , unittest.TestCase ):
_snake_case : Dict = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
_snake_case : Optional[int] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
_snake_case : Optional[Any] = True
_snake_case : List[str] = False
_snake_case : Dict = False
_snake_case : str = False
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = FlaxPegasusModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = model_class(__lowerCAmelCase )
@jax.jit
def encode_jitted(__lowerCAmelCase : str , __lowerCAmelCase : Tuple=None , **__lowerCAmelCase : Dict ):
return model.encode(input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase )
with self.subTest("""JIT Enabled""" ):
_UpperCAmelCase = encode_jitted(**__lowerCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_UpperCAmelCase = encode_jitted(**__lowerCAmelCase ).to_tuple()
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
for jitted_output, output in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase = model_class(__lowerCAmelCase )
_UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
_UpperCAmelCase = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(__lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] ):
return model.decode(
decoder_input_ids=__lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , encoder_outputs=__lowerCAmelCase , )
with self.subTest("""JIT Enabled""" ):
_UpperCAmelCase = decode_jitted(**__lowerCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_UpperCAmelCase = decode_jitted(**__lowerCAmelCase ).to_tuple()
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
for jitted_output, output in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
for model_class_name in self.all_model_classes:
_UpperCAmelCase = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=__lowerCAmelCase )
_UpperCAmelCase = np.ones((1, 1) )
_UpperCAmelCase = model(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@slow
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" )
_UpperCAmelCase = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" )
_UpperCAmelCase = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
_UpperCAmelCase = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
_UpperCAmelCase = tokenizer(__lowerCAmelCase , return_tensors="""np""" , truncation=__lowerCAmelCase , max_length=512 , padding=__lowerCAmelCase )
_UpperCAmelCase = model.generate(**__lowerCAmelCase , num_beams=2 ).sequences
_UpperCAmelCase = tokenizer.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
assert tgt_text == decoded
| 289 | 0 |
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class a_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase__ : Tuple , lowercase__ : int=None , lowercase__ : int=True , lowercase__ : Any=None , **lowercase__ : Tuple):
'''simple docstring'''
lowerCAmelCase__ = parent
lowerCAmelCase__ = config_class
lowerCAmelCase__ = has_text_modality
lowerCAmelCase__ = kwargs
lowerCAmelCase__ = common_properties
def __snake_case ( self : Union[str, Any]):
'''simple docstring'''
lowerCAmelCase__ = self.config_class(**self.inputs_dict)
lowerCAmelCase__ = (
['hidden_size', 'num_attention_heads', 'num_hidden_layers']
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(['vocab_size'])
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(lowercase__ , lowercase__) , msg=F"""`{prop}` does not exist""")
# Test that config has the common properties as setter
for idx, name in enumerate(lowercase__):
try:
setattr(lowercase__ , lowercase__ , lowercase__)
self.parent.assertEqual(
getattr(lowercase__ , lowercase__) , lowercase__ , msg=F"""`{name} value {idx} expected, but was {getattr(lowercase__ , lowercase__)}""")
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(lowercase__):
try:
lowerCAmelCase__ = self.config_class(**{name: idx})
self.parent.assertEqual(
getattr(lowercase__ , lowercase__) , lowercase__ , msg=F"""`{name} value {idx} expected, but was {getattr(lowercase__ , lowercase__)}""")
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def __snake_case ( self : Dict):
'''simple docstring'''
lowerCAmelCase__ = self.config_class(**self.inputs_dict)
lowerCAmelCase__ = json.loads(config.to_json_string())
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , lowercase__)
def __snake_case ( self : Any):
'''simple docstring'''
lowerCAmelCase__ = self.config_class(**self.inputs_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = os.path.join(lowercase__ , 'config.json')
config_first.to_json_file(lowercase__)
lowerCAmelCase__ = self.config_class.from_json_file(lowercase__)
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict())
def __snake_case ( self : Optional[Any]):
'''simple docstring'''
lowerCAmelCase__ = self.config_class(**self.inputs_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(lowercase__)
lowerCAmelCase__ = self.config_class.from_pretrained(lowercase__)
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict())
def __snake_case ( self : Optional[int]):
'''simple docstring'''
lowerCAmelCase__ = self.config_class(**self.inputs_dict)
lowerCAmelCase__ = 'test'
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = os.path.join(lowercase__ , lowercase__)
config_first.save_pretrained(lowercase__)
lowerCAmelCase__ = self.config_class.from_pretrained(lowercase__ , subfolder=lowercase__)
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict())
def __snake_case ( self : List[str]):
'''simple docstring'''
lowerCAmelCase__ = self.config_class(**self.inputs_dict , num_labels=5)
self.parent.assertEqual(len(config.idalabel) , 5)
self.parent.assertEqual(len(config.labelaid) , 5)
lowerCAmelCase__ = 3
self.parent.assertEqual(len(config.idalabel) , 3)
self.parent.assertEqual(len(config.labelaid) , 3)
def __snake_case ( self : List[str]):
'''simple docstring'''
if self.config_class.is_composition:
return
lowerCAmelCase__ = self.config_class()
self.parent.assertIsNotNone(lowercase__)
def __snake_case ( self : Optional[Any]):
'''simple docstring'''
lowerCAmelCase__ = copy.deepcopy(lowercase__)
lowerCAmelCase__ = self.config_class(**lowercase__)
lowerCAmelCase__ = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(('torch_dtype', config.torch_dtype, torch.floataa))
elif getattr(lowercase__ , lowercase__) != value:
wrong_values.append((key, getattr(lowercase__ , lowercase__), value))
if len(lowercase__) > 0:
lowerCAmelCase__ = '\n'.join([F"""- {v[0]}: got {v[1]} instead of {v[2]}""" for v in wrong_values])
raise ValueError(F"""The following keys were not properly set in the config:\n{errors}""")
def __snake_case ( self : Any):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 352 | import numpy as np
def __lowerCamelCase ( lowerCAmelCase__ ):
return 1 / (1 + np.exp(-vector ))
def __lowerCamelCase ( lowerCAmelCase__ ):
return vector * sigmoid(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 119 | 0 |
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def UpperCAmelCase ( UpperCAmelCase ) -> Any:
def wrapper(*UpperCAmelCase , **UpperCAmelCase ):
snake_case_ = timeit.default_timer()
snake_case_ = func(*UpperCAmelCase , **UpperCAmelCase )
snake_case_ = timeit.default_timer() - starttime
return delta
snake_case_ = func.__name__
return wrapper
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=100 , UpperCAmelCase=None ) -> Tuple:
snake_case_ = []
snake_case_ = seq_shapes or {}
for i in range(UpperCAmelCase ):
snake_case_ = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(UpperCAmelCase , _ArrayXD ):
snake_case_ = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(UpperCAmelCase , datasets.Value ):
if v.dtype == "string":
snake_case_ = 'The small grey turtle was surprisingly fast when challenged.'
else:
snake_case_ = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(UpperCAmelCase , datasets.Sequence ):
while isinstance(UpperCAmelCase , datasets.Sequence ):
snake_case_ = v.feature
snake_case_ = seq_shapes[k]
snake_case_ = np.random.rand(*UpperCAmelCase ).astype(v.dtype )
snake_case_ = data
dummy_data.append((i, example) )
return dummy_data
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=100 , UpperCAmelCase=None ) -> str:
snake_case_ = generate_examples(UpperCAmelCase , num_examples=UpperCAmelCase , seq_shapes=UpperCAmelCase )
with ArrowWriter(features=UpperCAmelCase , path=UpperCAmelCase ) as writer:
for key, record in dummy_data:
snake_case_ = features.encode_example(UpperCAmelCase )
writer.write(UpperCAmelCase )
snake_case_ , snake_case_ = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f'Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.' )
snake_case_ = datasets.Dataset.from_file(filename=UpperCAmelCase , info=datasets.DatasetInfo(features=UpperCAmelCase ) )
return dataset
| 69 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class A:
'''simple docstring'''
def __init__( self : str , A_ : Optional[Any] , ) -> str:
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = 13
lowerCamelCase_ = 7
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = 2
lowerCamelCase_ = 99
lowerCamelCase_ = 0
lowerCamelCase_ = 32
lowerCamelCase_ = 2
lowerCamelCase_ = 4
lowerCamelCase_ = 0.1
lowerCamelCase_ = 0.1
lowerCamelCase_ = 512
lowerCamelCase_ = 16
lowerCamelCase_ = 2
lowerCamelCase_ = 0.02
lowerCamelCase_ = 3
lowerCamelCase_ = 4
lowerCamelCase_ = 'last'
lowerCamelCase_ = True
lowerCamelCase_ = None
lowerCamelCase_ = 0
def a__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
lowerCamelCase_ = None
if self.use_input_lengths:
lowerCamelCase_ = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowerCamelCase_ = None
if self.use_token_type_ids:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def a__ ( self : int , A_ : List[str] , A_ : List[Any] , A_ : str , A_ : List[Any] , A_ : int , A_ : Tuple , A_ : Optional[int] , A_ : Optional[int] , A_ : str , ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = TFFlaubertModel(config=A_ )
lowerCamelCase_ = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
lowerCamelCase_ = model(A_ )
lowerCamelCase_ = [input_ids, input_mask]
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self : Tuple , A_ : List[str] , A_ : int , A_ : List[Any] , A_ : Any , A_ : Any , A_ : Dict , A_ : str , A_ : List[Any] , A_ : Union[str, Any] , ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = TFFlaubertWithLMHeadModel(A_ )
lowerCamelCase_ = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self : str , A_ : Tuple , A_ : Any , A_ : Any , A_ : List[Any] , A_ : Dict , A_ : List[Any] , A_ : Union[str, Any] , A_ : Optional[int] , A_ : List[Any] , ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = TFFlaubertForQuestionAnsweringSimple(A_ )
lowerCamelCase_ = {'input_ids': input_ids, 'lengths': input_lengths}
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ ( self : Optional[int] , A_ : List[Any] , A_ : str , A_ : List[str] , A_ : Dict , A_ : Optional[Any] , A_ : Tuple , A_ : str , A_ : Optional[int] , A_ : Tuple , ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = TFFlaubertForSequenceClassification(A_ )
lowerCamelCase_ = {'input_ids': input_ids, 'lengths': input_lengths}
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self : Dict , A_ : Optional[Any] , A_ : List[Any] , A_ : int , A_ : Any , A_ : Union[str, Any] , A_ : str , A_ : Any , A_ : Union[str, Any] , A_ : List[str] , ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = TFFlaubertForTokenClassification(config=A_ )
lowerCamelCase_ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self : List[Any] , A_ : Optional[int] , A_ : List[Any] , A_ : Optional[int] , A_ : Tuple , A_ : Union[str, Any] , A_ : int , A_ : str , A_ : Tuple , A_ : str , ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = self.num_choices
lowerCamelCase_ = TFFlaubertForMultipleChoice(config=A_ )
lowerCamelCase_ = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase_ = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase_ = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase_ = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'langs': token_type_ids,
'lengths': input_lengths,
}
return config, inputs_dict
@require_tf
class A( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCamelCase = (
{
'''feature-extraction''': TFFlaubertModel,
'''fill-mask''': TFFlaubertWithLMHeadModel,
'''question-answering''': TFFlaubertForQuestionAnsweringSimple,
'''text-classification''': TFFlaubertForSequenceClassification,
'''token-classification''': TFFlaubertForTokenClassification,
'''zero-shot''': TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
def a__ ( self : Union[str, Any] , A_ : Any , A_ : List[Any] , A_ : Union[str, Any] , A_ : str , A_ : List[str] ) -> Optional[Any]:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def a__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = TFFlaubertModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=A_ , emb_dim=37 )
def a__ ( self : List[str] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ ( self : int ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*A_ )
def a__ ( self : List[str] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*A_ )
def a__ ( self : Dict ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*A_ )
def a__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*A_ )
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*A_ )
def a__ ( self : int ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*A_ )
@slow
def a__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = TFFlaubertModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_tf
@require_sentencepiece
@require_tokenizers
class A( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ ( self : Dict ) -> str:
"""simple docstring"""
lowerCamelCase_ = TFFlaubertModel.from_pretrained('jplu/tf-flaubert-small-cased' )
lowerCamelCase_ = tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
lowerCamelCase_ = model(A_ )[0]
lowerCamelCase_ = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , A_ )
# compare the actual values for a slice.
lowerCamelCase_ = tf.convert_to_tensor(
[
[
[-1.8768773, -1.566555, 0.27072418],
[-1.6920038, -0.5873505, 1.9329599],
[-2.9563985, -1.6993835, 1.7972052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 204 | 0 |
def lowerCAmelCase_ ( __a ) -> Any:
"""simple docstring"""
for i in range(0 , UpperCAmelCase__ ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(" " , end="" )
for _ in range(0 , i + 1 ): # printing stars
print("* " , end="" )
print()
def lowerCAmelCase_ ( __a ) -> Any:
"""simple docstring"""
for i in range(UpperCAmelCase__ , 0 , -1 ):
for _ in range(UpperCAmelCase__ , 0 , -1 ): # printing stars
print("* " , end="" )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(" " , end="" )
def lowerCAmelCase_ ( __a ) -> str:
"""simple docstring"""
if n <= 0:
print(" ... .... nothing printing :(" )
return
floyd(UpperCAmelCase__ ) # upper half
reverse_floyd(UpperCAmelCase__ ) # lower half
if __name__ == "__main__":
print(R"| /\ | |- | |- |--| |\ /| |-")
print(R"|/ \| |- |_ |_ |__| | \/ | |_")
__A = 1
while K:
__A = int(input("enter the number and , and see the magic : "))
print()
pretty_print(user_number)
__A = int(input("press 0 to exit... and 1 to continue..."))
print("Good Bye...")
| 371 |
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def lowerCAmelCase_ ( __a ) -> int:
"""simple docstring"""
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
__A = "\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n"
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@staticmethod
def SCREAMING_SNAKE_CASE_ (UpperCAmelCase_ : ArgumentParser) ->str:
'''simple docstring'''
lowerCamelCase__: Dict =parser.add_parser(
"convert" , help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints." , )
train_parser.add_argument("--model_type" , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help="Model's type.")
train_parser.add_argument(
"--tf_checkpoint" , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help="TensorFlow checkpoint path or folder.")
train_parser.add_argument(
"--pytorch_dump_output" , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help="Path to the PyTorch saved model output.")
train_parser.add_argument("--config" , type=UpperCAmelCase_ , default="" , help="Configuration file path or folder.")
train_parser.add_argument(
"--finetuning_task_name" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help="Optional fine-tuning task name if the TF model was a finetuned model." , )
train_parser.set_defaults(func=UpperCAmelCase_)
def __init__(self : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : str , *UpperCAmelCase_ : Optional[int] , ) ->List[str]:
'''simple docstring'''
lowerCamelCase__: Dict =logging.get_logger("transformers-cli/converting")
self._logger.info(F"""Loading model {model_type}""")
lowerCamelCase__: Any =model_type
lowerCamelCase__: Optional[int] =tf_checkpoint
lowerCamelCase__: Any =pytorch_dump_output
lowerCamelCase__: Union[str, Any] =config
lowerCamelCase__: str =finetuning_task_name
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Tuple:
'''simple docstring'''
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCAmelCase_)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCAmelCase_)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCAmelCase_)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(UpperCAmelCase_)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCAmelCase_)
if "ckpt" in self._tf_checkpoint.lower():
lowerCamelCase__: Tuple =self._tf_checkpoint
lowerCamelCase__: List[str] =""
else:
lowerCamelCase__: Any =self._tf_checkpoint
lowerCamelCase__: Dict =""
convert_transfo_xl_checkpoint_to_pytorch(
UpperCAmelCase_ , self._config , self._pytorch_dump_output , UpperCAmelCase_)
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCAmelCase_)
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCAmelCase_)
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name)
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output)
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output)
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
else:
raise ValueError(
"--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]")
| 273 | 0 |
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __magic_name__ ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
_A: Optional[int] = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
_A: List[Any] = self.dummy_uncond_unet
_A: List[Any] = PNDMScheduler()
_A: int = PNDMPipeline(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
pndm.to(lowerCAmelCase_ )
pndm.set_progress_bar_config(disable=lowerCAmelCase_ )
_A: Dict = torch.manual_seed(0 )
_A: Any = pndm(generator=lowerCAmelCase_ , num_inference_steps=2_0 , output_type='''numpy''' ).images
_A: Union[str, Any] = torch.manual_seed(0 )
_A: Union[str, Any] = pndm(generator=lowerCAmelCase_ , num_inference_steps=2_0 , output_type='''numpy''' , return_dict=lowerCAmelCase_ )[0]
_A: List[str] = image[0, -3:, -3:, -1]
_A: Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
_A: List[str] = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
_A: int = '''google/ddpm-cifar10-32'''
_A: Union[str, Any] = UNetaDModel.from_pretrained(lowerCAmelCase_ )
_A: Optional[Any] = PNDMScheduler()
_A: List[Any] = PNDMPipeline(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
pndm.to(lowerCAmelCase_ )
pndm.set_progress_bar_config(disable=lowerCAmelCase_ )
_A: int = torch.manual_seed(0 )
_A: Dict = pndm(generator=lowerCAmelCase_ , output_type='''numpy''' ).images
_A: Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
_A: Optional[Any] = np.array([0.1564, 0.14645, 0.1406, 0.14715, 0.12425, 0.14045, 0.13115, 0.12175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 121 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase__ : Any = logging.get_logger(__name__)
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Dict = '''maskformer-swin'''
__UpperCamelCase : Any = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Optional[Any] , lowerCAmelCase_ : int=2_2_4 , lowerCAmelCase_ : Tuple=4 , lowerCAmelCase_ : Any=3 , lowerCAmelCase_ : Dict=9_6 , lowerCAmelCase_ : Union[str, Any]=[2, 2, 6, 2] , lowerCAmelCase_ : Optional[Any]=[3, 6, 1_2, 2_4] , lowerCAmelCase_ : Optional[Any]=7 , lowerCAmelCase_ : Optional[Any]=4.0 , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Optional[Any]=0.0 , lowerCAmelCase_ : Union[str, Any]=0.0 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Optional[Any]="gelu" , lowerCAmelCase_ : Optional[int]=False , lowerCAmelCase_ : Dict=0.02 , lowerCAmelCase_ : str=1e-5 , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Any=None , **lowerCAmelCase_ : int , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_A: List[Any] = image_size
_A: Optional[int] = patch_size
_A: Optional[Any] = num_channels
_A: str = embed_dim
_A: Any = depths
_A: str = len(lowerCAmelCase_ )
_A: Any = num_heads
_A: int = window_size
_A: Dict = mlp_ratio
_A: str = qkv_bias
_A: List[str] = hidden_dropout_prob
_A: List[Any] = attention_probs_dropout_prob
_A: Dict = drop_path_rate
_A: List[Any] = hidden_act
_A: Optional[int] = use_absolute_embeddings
_A: Tuple = layer_norm_eps
_A: Union[str, Any] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_A: Any = int(embed_dim * 2 ** (len(lowerCAmelCase_ ) - 1) )
_A: Tuple = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(lowerCAmelCase_ ) + 1 )]
_A , _A: str = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase_ , out_indices=lowerCAmelCase_ , stage_names=self.stage_names )
| 121 | 1 |
"""simple docstring"""
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class lowercase__ ( snake_case__ ):
@slow
@require_torch
def UpperCAmelCase__ ( self : Tuple ):
lowerCamelCase_ : str =EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny" , "prajjwal1/bert-tiny" )
lowerCamelCase_ : List[Any] =BertTokenizer.from_pretrained("bert-base-uncased" )
lowerCamelCase_ : Optional[int] =bertabert.config.encoder.vocab_size
lowerCamelCase_ : List[str] =tokenizer.sep_token_id
lowerCamelCase_ : Optional[int] =tokenizer.cls_token_id
lowerCamelCase_ : int =128
lowerCamelCase_ : Tuple =datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="train[:1%]" )
lowerCamelCase_ : List[Any] =datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="validation[:1%]" )
lowerCamelCase_ : str =train_dataset.select(range(32 ) )
lowerCamelCase_ : Tuple =val_dataset.select(range(16 ) )
lowerCamelCase_ : Tuple =4
def _map_to_encoder_decoder_inputs(snake_case__ : Dict ):
# Tokenizer will automatically set [BOS] <text> [EOS]
lowerCamelCase_ : Optional[int] =tokenizer(batch["article"] , padding="max_length" , truncation=snake_case__ , max_length=512 )
lowerCamelCase_ : List[str] =tokenizer(batch["highlights"] , padding="max_length" , truncation=snake_case__ , max_length=128 )
lowerCamelCase_ : Any =inputs.input_ids
lowerCamelCase_ : Dict =inputs.attention_mask
lowerCamelCase_ : List[Any] =outputs.input_ids
lowerCamelCase_ : str =outputs.input_ids.copy()
lowerCamelCase_ : Optional[Any] =[
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"]
]
lowerCamelCase_ : Tuple =outputs.attention_mask
assert all(len(snake_case__ ) == 512 for x in inputs.input_ids )
assert all(len(snake_case__ ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(snake_case__ : Optional[int] ):
lowerCamelCase_ : Dict =pred.label_ids
lowerCamelCase_ : List[Any] =pred.predictions
# all unnecessary tokens are removed
lowerCamelCase_ : Union[str, Any] =tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ )
lowerCamelCase_ : str =tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ )
lowerCamelCase_ : Dict =sum([int(pred_str[i] == label_str[i] ) for i in range(len(snake_case__ ) )] ) / len(snake_case__ )
return {"accuracy": accuracy}
# map train dataset
lowerCamelCase_ : List[Any] =train_dataset.map(
_map_to_encoder_decoder_inputs , batched=snake_case__ , batch_size=snake_case__ , remove_columns=["article", "highlights"] , )
train_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
# same for validation dataset
lowerCamelCase_ : Optional[Any] =val_dataset.map(
_map_to_encoder_decoder_inputs , batched=snake_case__ , batch_size=snake_case__ , remove_columns=["article", "highlights"] , )
val_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
lowerCamelCase_ : Union[str, Any] =self.get_auto_remove_tmp_dir()
lowerCamelCase_ : List[Any] =SeqaSeqTrainingArguments(
output_dir=snake_case__ , per_device_train_batch_size=snake_case__ , per_device_eval_batch_size=snake_case__ , predict_with_generate=snake_case__ , evaluation_strategy="steps" , do_train=snake_case__ , do_eval=snake_case__ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
lowerCamelCase_ : List[str] =SeqaSeqTrainer(
model=snake_case__ , args=snake_case__ , compute_metrics=_compute_metrics , train_dataset=snake_case__ , eval_dataset=snake_case__ , tokenizer=snake_case__ , )
# start training
trainer.train()
| 209 |
"""simple docstring"""
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def _snake_case ( lowerCamelCase__ : Any ) -> Union[str, Any]: # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def _snake_case ( ) -> List[Any]:
with parallel_backend("spark" ):
assert ParallelBackendConfig.backend_name == "spark"
lowerCamelCase_ : Optional[Any] =[1, 2, 3]
with pytest.raises(lowerCamelCase__ ):
with parallel_backend("unsupported backend" ):
map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=2 )
with pytest.raises(lowerCamelCase__ ):
with parallel_backend("unsupported backend" ):
map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc" , [2, -1] )
def _snake_case ( lowerCamelCase__ : Tuple ) -> Optional[Any]:
lowerCamelCase_ : str =[1, 2]
lowerCamelCase_ : List[str] ={"a": 1, "b": 2}
lowerCamelCase_ : List[str] ={"a": [1, 2], "b": [3, 4]}
lowerCamelCase_ : Optional[int] ={"a": {"1": 1}, "b": 2}
lowerCamelCase_ : int ={"a": 1, "b": 2, "c": 3, "d": 4}
lowerCamelCase_ : Optional[int] =[2, 3]
lowerCamelCase_ : List[Any] ={"a": 2, "b": 3}
lowerCamelCase_ : int ={"a": [2, 3], "b": [4, 5]}
lowerCamelCase_ : str ={"a": {"1": 2}, "b": 3}
lowerCamelCase_ : Dict ={"a": 2, "b": 3, "c": 4, "d": 5}
with parallel_backend("spark" ):
assert map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) == expected_map_nested_sa
assert map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) == expected_map_nested_sa
assert map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) == expected_map_nested_sa
assert map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) == expected_map_nested_sa
assert map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) == expected_map_nested_sa
| 209 | 1 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'''asapp/sew-d-tiny-100k''': '''https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json''',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class _snake_case ( a__ ):
snake_case__ = "sew-d"
def __init__( self : Union[str, Any] , UpperCAmelCase : List[str]=32 , UpperCAmelCase : Union[str, Any]=768 , UpperCAmelCase : Union[str, Any]=12 , UpperCAmelCase : Any=12 , UpperCAmelCase : Union[str, Any]=3072 , UpperCAmelCase : List[Any]=2 , UpperCAmelCase : Optional[Any]=512 , UpperCAmelCase : str=256 , UpperCAmelCase : int=True , UpperCAmelCase : str=True , UpperCAmelCase : Dict=("p2c", "c2p") , UpperCAmelCase : List[str]="layer_norm" , UpperCAmelCase : List[str]="gelu_python" , UpperCAmelCase : Union[str, Any]=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : Dict=0.0 , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : Tuple=0.0_2 , UpperCAmelCase : Union[str, Any]=1E-7 , UpperCAmelCase : Tuple=1E-5 , UpperCAmelCase : int="group" , UpperCAmelCase : Optional[int]="gelu" , UpperCAmelCase : Optional[Any]=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , UpperCAmelCase : str=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , UpperCAmelCase : List[Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , UpperCAmelCase : Tuple=False , UpperCAmelCase : int=128 , UpperCAmelCase : Optional[int]=16 , UpperCAmelCase : List[Any]=True , UpperCAmelCase : int=0.0_5 , UpperCAmelCase : Optional[Any]=10 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : Any=0.0 , UpperCAmelCase : Any=10 , UpperCAmelCase : Any=0 , UpperCAmelCase : List[Any]="mean" , UpperCAmelCase : Any=False , UpperCAmelCase : Any=False , UpperCAmelCase : List[Any]=256 , UpperCAmelCase : List[str]=0 , UpperCAmelCase : List[str]=1 , UpperCAmelCase : int=2 , **UpperCAmelCase : Any , ):
super().__init__(**UpperCAmelCase , pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase )
__lowerCamelCase : str = hidden_size
__lowerCamelCase : Optional[Any] = feat_extract_norm
__lowerCamelCase : Optional[int] = feat_extract_activation
__lowerCamelCase : List[Any] = list(UpperCAmelCase )
__lowerCamelCase : Tuple = list(UpperCAmelCase )
__lowerCamelCase : Optional[Any] = list(UpperCAmelCase )
__lowerCamelCase : List[str] = conv_bias
__lowerCamelCase : Optional[int] = num_conv_pos_embeddings
__lowerCamelCase : Optional[int] = num_conv_pos_embedding_groups
__lowerCamelCase : List[str] = len(self.conv_dim )
__lowerCamelCase : Union[str, Any] = num_hidden_layers
__lowerCamelCase : Optional[Any] = intermediate_size
__lowerCamelCase : Any = squeeze_factor
__lowerCamelCase : Optional[Any] = max_position_embeddings
__lowerCamelCase : List[str] = position_buckets
__lowerCamelCase : List[str] = share_att_key
__lowerCamelCase : Tuple = relative_attention
__lowerCamelCase : Tuple = norm_rel_ebd
__lowerCamelCase : str = list(UpperCAmelCase )
__lowerCamelCase : str = hidden_act
__lowerCamelCase : int = num_attention_heads
__lowerCamelCase : int = hidden_dropout
__lowerCamelCase : Optional[int] = attention_dropout
__lowerCamelCase : List[Any] = activation_dropout
__lowerCamelCase : Tuple = feat_proj_dropout
__lowerCamelCase : Optional[Any] = final_dropout
__lowerCamelCase : Dict = layer_norm_eps
__lowerCamelCase : List[str] = feature_layer_norm_eps
__lowerCamelCase : List[Any] = initializer_range
__lowerCamelCase : Any = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect."
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"
F"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
F"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCamelCase : Any = apply_spec_augment
__lowerCamelCase : Optional[int] = mask_time_prob
__lowerCamelCase : Tuple = mask_time_length
__lowerCamelCase : List[str] = mask_time_min_masks
__lowerCamelCase : Optional[Any] = mask_feature_prob
__lowerCamelCase : Any = mask_feature_length
__lowerCamelCase : str = mask_feature_min_masks
# ctc loss
__lowerCamelCase : Optional[int] = ctc_loss_reduction
__lowerCamelCase : Tuple = ctc_zero_infinity
# sequence classification
__lowerCamelCase : List[Any] = use_weighted_layer_sum
__lowerCamelCase : Union[str, Any] = classifier_proj_size
@property
def lowerCamelCase__ ( self : List[Any] ):
return functools.reduce(operator.mul , self.conv_stride , 1 ) | 135 | """simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
__A = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
__A = [ord(letter) for letter in string.ascii_lowercase]
__A = {ord(char) for char in VALID_CHARS}
__A = ["the", "be", "to", "of", "and", "in", "that", "have"]
def lowercase_ ( _lowerCamelCase: list[int] , _lowerCamelCase: tuple[int, ...] ) -> str | None:
'''simple docstring'''
__lowerCamelCase : str = ""
__lowerCamelCase : int
__lowerCamelCase : int
__lowerCamelCase : int
for keychar, cipherchar in zip(cycle(_lowerCamelCase ) , _lowerCamelCase ):
__lowerCamelCase : str = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(_lowerCamelCase )
return decoded
def lowercase_ ( _lowerCamelCase: list[int] ) -> list[str]:
'''simple docstring'''
__lowerCamelCase : list[str] = []
for key in product(_lowerCamelCase , repeat=3 ):
__lowerCamelCase : Tuple = try_key(_lowerCamelCase , _lowerCamelCase )
if encoded is not None:
possibles.append(_lowerCamelCase )
return possibles
def lowercase_ ( _lowerCamelCase: list[str] , _lowerCamelCase: str ) -> list[str]:
'''simple docstring'''
return [possible for possible in possibles if common_word in possible.lower()]
def lowercase_ ( _lowerCamelCase: str = "p059_cipher.txt" ) -> int:
'''simple docstring'''
__lowerCamelCase : list[int]
__lowerCamelCase : list[str]
__lowerCamelCase : str
__lowerCamelCase : str
__lowerCamelCase : str = Path(_lowerCamelCase ).parent.joinpath(_lowerCamelCase ).read_text(encoding="utf-8" )
__lowerCamelCase : Any = [int(_lowerCamelCase ) for number in data.strip().split("," )]
__lowerCamelCase : Any = filter_valid_chars(_lowerCamelCase )
for common_word in COMMON_WORDS:
__lowerCamelCase : Dict = filter_common_word(_lowerCamelCase , _lowerCamelCase )
if len(_lowerCamelCase ) == 1:
break
__lowerCamelCase : List[Any] = possibles[0]
return sum(ord(_lowerCamelCase ) for char in decoded_text )
if __name__ == "__main__":
print(F"""{solution() = }""") | 135 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
lowerCamelCase : int = False
@skip_mps
class __lowercase (UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_snake_case = StableDiffusionAttendAndExcitePipeline
_snake_case = False
_snake_case = TEXT_TO_IMAGE_PARAMS
_snake_case = TEXT_TO_IMAGE_BATCH_PARAMS.union({"""token_indices"""} )
_snake_case = TEXT_TO_IMAGE_IMAGE_PARAMS
_snake_case = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def UpperCAmelCase ( cls ) -> str:
super().setUpClass()
torch.use_deterministic_algorithms(_snake_case )
@classmethod
def UpperCAmelCase ( cls ) -> Optional[Any]:
super().tearDownClass()
torch.use_deterministic_algorithms(_snake_case )
def UpperCAmelCase ( self ) -> List[str]:
torch.manual_seed(0 )
snake_case : Optional[Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=1 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=_snake_case , )
snake_case : int = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
torch.manual_seed(0 )
snake_case : Tuple = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
snake_case : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="""gelu""" , projection_dim=5_1_2 , )
snake_case : Dict = CLIPTextModel(_snake_case )
snake_case : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
snake_case : str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCAmelCase ( self , A , A=0 ) -> Optional[Any]:
if str(_snake_case ).startswith("""mps""" ):
snake_case : Optional[int] = torch.manual_seed(_snake_case )
else:
snake_case : int = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
snake_case : Optional[Any] = {
"""prompt""": """a cat and a frog""",
"""token_indices""": [2, 5],
"""generator""": generator,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""max_iter_to_alter""": 2,
"""thresholds""": {0: 0.7},
}
return inputs
def UpperCAmelCase ( self ) -> Union[str, Any]:
snake_case : str = """cpu"""
snake_case : Optional[int] = self.get_dummy_components()
snake_case : List[str] = self.pipeline_class(**_snake_case )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
snake_case : List[Any] = self.get_dummy_inputs(_snake_case )
snake_case : Tuple = pipe(**_snake_case ).images
snake_case : Tuple = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 6_4, 6_4, 3) )
snake_case : List[str] = np.array(
[0.63_90_53_64, 0.62_89_73_07, 0.48_59_90_17, 0.5_13_36_24, 0.5_55_00_48, 0.45_76_95_16, 0.50_32_69_73, 0.5_02_31_39, 0.45_38_44_96] )
snake_case : Tuple = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_snake_case , 1e-3 )
def UpperCAmelCase ( self ) -> List[Any]:
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def UpperCAmelCase ( self ) -> Dict:
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCAmelCase ( self ) -> str:
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 )
def UpperCAmelCase ( self ) -> Any:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def UpperCAmelCase ( self ) -> List[Any]:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def UpperCAmelCase ( self ) -> Union[str, Any]:
super().test_save_load_local(expected_max_difference=5e-4 )
def UpperCAmelCase ( self ) -> Any:
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@classmethod
def UpperCAmelCase ( cls ) -> Tuple:
super().setUpClass()
torch.use_deterministic_algorithms(_snake_case )
@classmethod
def UpperCAmelCase ( cls ) -> List[Any]:
super().tearDownClass()
torch.use_deterministic_algorithms(_snake_case )
def UpperCAmelCase ( self ) -> Union[str, Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self ) -> List[str]:
snake_case : List[Any] = torch.manual_seed(5_1 )
snake_case : int = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , safety_checker=_snake_case , torch_dtype=torch.floataa )
pipe.to("""cuda""" )
snake_case : str = """a painting of an elephant with glasses"""
snake_case : int = [5, 7]
snake_case : str = pipe(
prompt=_snake_case , token_indices=_snake_case , guidance_scale=7.5 , generator=_snake_case , num_inference_steps=5 , max_iter_to_alter=5 , output_type="""numpy""" , ).images[0]
snake_case : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy""" )
assert np.abs((expected_image - image).max() ) < 5e-1
| 357 |
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[str]:
return ConvertCommand(
args.model_type ,args.tf_checkpoint ,args.pytorch_dump_output ,args.config ,args.finetuning_task_name )
lowerCamelCase : Tuple = '\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n'
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
@staticmethod
def UpperCAmelCase ( A ) -> List[str]:
snake_case : Union[str, Any] = parser.add_parser(
"""convert""" , help="""CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.""" , )
train_parser.add_argument("""--model_type""" , type=A , required=A , help="""Model's type.""" )
train_parser.add_argument(
"""--tf_checkpoint""" , type=A , required=A , help="""TensorFlow checkpoint path or folder.""" )
train_parser.add_argument(
"""--pytorch_dump_output""" , type=A , required=A , help="""Path to the PyTorch saved model output.""" )
train_parser.add_argument("""--config""" , type=A , default="""""" , help="""Configuration file path or folder.""" )
train_parser.add_argument(
"""--finetuning_task_name""" , type=A , default=A , help="""Optional fine-tuning task name if the TF model was a finetuned model.""" , )
train_parser.set_defaults(func=A )
def __init__( self , A , A , A , A , A , *A , ) -> List[Any]:
snake_case : Any = logging.get_logger("""transformers-cli/converting""" )
self._logger.info(f"""Loading model {model_type}""" )
snake_case : int = model_type
snake_case : Any = tf_checkpoint
snake_case : int = pytorch_dump_output
snake_case : List[str] = config
snake_case : Tuple = finetuning_task_name
def UpperCAmelCase ( self ) -> int:
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(A )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A )
if "ckpt" in self._tf_checkpoint.lower():
snake_case : List[Any] = self._tf_checkpoint
snake_case : Tuple = """"""
else:
snake_case : Tuple = self._tf_checkpoint
snake_case : Tuple = """"""
convert_transfo_xl_checkpoint_to_pytorch(
A , self._config , self._pytorch_dump_output , A )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
"""--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]""" )
| 176 | 0 |
"""simple docstring"""
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
return "".join(chr(ord(_UpperCamelCase ) - 32 ) if 'a' <= char <= 'z' else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 77 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase : Any = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[Any] = ["FunnelTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = [
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Union[str, Any] = [
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 47 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
_snake_case = None
_snake_case = logging.get_logger(__name__)
_snake_case = {'vocab_file': 'sentencepiece.model', 'tokenizer_file': 'tokenizer.json'}
_snake_case = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
'tokenizer_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/tokenizer.json',
},
}
_snake_case = {
'google/rembert': 256,
}
_snake_case = '▁'
class UpperCamelCase ( snake_case_ ):
UpperCamelCase : str = VOCAB_FILES_NAMES
UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : Optional[int] = RemBertTokenizer
def __init__( self : List[Any] , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : str=None , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Dict=False , UpperCAmelCase__ : Tuple="[CLS]" , UpperCAmelCase__ : List[str]="[SEP]" , UpperCAmelCase__ : List[Any]="<unk>" , UpperCAmelCase__ : str="[SEP]" , UpperCAmelCase__ : Dict="<pad>" , UpperCAmelCase__ : Tuple="[CLS]" , UpperCAmelCase__ : str="[MASK]" , **UpperCAmelCase__ : Tuple , ) -> str:
# Mask token behave like a normal word, i.e. include the space before it
_a : Dict = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else mask_token
super().__init__(
UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , do_lower_case=UpperCAmelCase__ , remove_space=UpperCAmelCase__ , keep_accents=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , **UpperCAmelCase__ , )
_a : str = do_lower_case
_a : Dict = remove_space
_a : Union[str, Any] = keep_accents
_a : str = vocab_file
_a : Optional[int] = False if not self.vocab_file else True
def _lowercase ( self : Tuple , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
_a : Optional[int] = [self.sep_token_id]
_a : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : bool = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(UpperCAmelCase__ )) + [1] + ([0] * len(UpperCAmelCase__ )) + [1]
return [1] + ([0] * len(UpperCAmelCase__ )) + [1]
def _lowercase ( self : List[str] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
_a : Optional[int] = [self.sep_token_id]
_a : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(UpperCAmelCase__ ):
logger.error("""Vocabulary path ({}) should be a directory""".format(UpperCAmelCase__ ) )
return
_a : Any = os.path.join(
UpperCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ):
copyfile(self.vocab_file , UpperCAmelCase__ )
return (out_vocab_file,)
| 324 |
"""simple docstring"""
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_snake_case = logging.getLogger(__name__)
_snake_case = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
_snake_case = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCamelCase :
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={
'''help''': (
'''The model checkpoint for weights initialization. Leave None if you want to train a model from'''
''' scratch.'''
)
} , )
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(snake_case_ )} , )
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class UpperCamelCase :
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''The input training data file (a text file).'''} )
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={
'''help''': (
'''The input training data files (multiple files in glob format). '''
'''Very often splitting large files to smaller files can prevent tokenizer going out of memory'''
)
} , )
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''An optional input train ref data file for whole word mask in Chinese.'''} , )
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''An optional input eval ref data file for whole word mask in Chinese.'''} , )
UpperCamelCase : bool = field(
default=snake_case_ , metadata={'''help''': '''Whether distinct lines of text in the dataset are to be handled as distinct sequences.'''} , )
UpperCamelCase : bool = field(
default=snake_case_ , metadata={'''help''': '''Train with masked-language modeling loss instead of language modeling.'''} )
UpperCamelCase : bool = field(default=snake_case_ , metadata={'''help''': '''Whether ot not to use whole word mask.'''} )
UpperCamelCase : float = field(
default=0.1_5 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
UpperCamelCase : float = field(
default=1 / 6 , metadata={
'''help''': (
'''Ratio of length of a span of masked tokens to surrounding context length for permutation language'''
''' modeling.'''
)
} , )
UpperCamelCase : int = field(
default=5 , metadata={'''help''': '''Maximum length of a span of masked tokens for permutation language modeling.'''} )
UpperCamelCase : int = field(
default=-1 , metadata={
'''help''': (
'''Optional input sequence length after tokenization.'''
'''The training dataset will be truncated in block of this size for training.'''
'''Default to the model max input length for single sentence inputs (take into account special tokens).'''
)
} , )
UpperCamelCase : bool = field(
default=snake_case_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False , UpperCamelCase__ = None , ):
'''simple docstring'''
def _dataset(UpperCamelCase__ , UpperCamelCase__=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("""You need to set world whole masking and mlm to True for Chinese Whole Word Mask""" )
return LineByLineWithRefDataset(
tokenizer=UpperCamelCase__ , file_path=UpperCamelCase__ , block_size=args.block_size , ref_path=UpperCamelCase__ , )
return LineByLineTextDataset(tokenizer=UpperCamelCase__ , file_path=UpperCamelCase__ , block_size=args.block_size )
else:
return TextDataset(
tokenizer=UpperCamelCase__ , file_path=UpperCamelCase__ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=UpperCamelCase__ , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(UpperCamelCase__ ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def lowerCAmelCase__ ( ):
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_a : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_a , _a , _a : List[str] = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"""Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file """
"""or remove the --do_eval argument.""" )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , UpperCamelCase__ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
_a : str = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
_a : Union[str, Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
_a : str = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.tokenizer_name:
_a : List[str] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
_a : Union[str, Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"""
""" script, save it,and load it from here, using --tokenizer_name""" )
if model_args.model_name_or_path:
_a : Optional[Any] = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , )
else:
logger.info("""Training new model from scratch""" )
_a : List[Any] = AutoModelWithLMHead.from_config(UpperCamelCase__ )
model.resize_token_embeddings(len(UpperCamelCase__ ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"""BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"""
"""--mlm flag (masked language modeling).""" )
if data_args.block_size <= 0:
_a : int = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
_a : Optional[Any] = min(data_args.block_size , tokenizer.max_len )
# Get datasets
_a : Optional[Any] = (
get_dataset(UpperCamelCase__ , tokenizer=UpperCamelCase__ , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
_a : Optional[int] = (
get_dataset(UpperCamelCase__ , tokenizer=UpperCamelCase__ , evaluate=UpperCamelCase__ , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
_a : Any = DataCollatorForPermutationLanguageModeling(
tokenizer=UpperCamelCase__ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
_a : Union[str, Any] = DataCollatorForWholeWordMask(
tokenizer=UpperCamelCase__ , mlm_probability=data_args.mlm_probability )
else:
_a : str = DataCollatorForLanguageModeling(
tokenizer=UpperCamelCase__ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
_a : Union[str, Any] = Trainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , data_collator=UpperCamelCase__ , train_dataset=UpperCamelCase__ , eval_dataset=UpperCamelCase__ , prediction_loss_only=UpperCamelCase__ , )
# Training
if training_args.do_train:
_a : Optional[Any] = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=UpperCamelCase__ )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_a : Union[str, Any] = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_a : int = trainer.evaluate()
_a : Dict = math.exp(eval_output["""eval_loss"""] )
_a : Union[str, Any] = {"""perplexity""": perplexity}
_a : Optional[Any] = os.path.join(training_args.output_dir , """eval_results_lm.txt""" )
if trainer.is_world_master():
with open(UpperCamelCase__ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key in sorted(result.keys() ):
logger.info(""" %s = %s""" , UpperCamelCase__ , str(result[key] ) )
writer.write("""%s = %s\n""" % (key, str(result[key] )) )
results.update(UpperCamelCase__ )
return results
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 324 | 1 |
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def a__ ( snake_case , snake_case , snake_case , snake_case="attention" ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = params[F'''{prefix}/layers_{i}/{layer_name}/key/kernel''']
__SCREAMING_SNAKE_CASE : Any = params[F'''{prefix}/layers_{i}/{layer_name}/out/kernel''']
__SCREAMING_SNAKE_CASE : Optional[Any] = params[F'''{prefix}/layers_{i}/{layer_name}/query/kernel''']
__SCREAMING_SNAKE_CASE : Any = params[F'''{prefix}/layers_{i}/{layer_name}/value/kernel''']
return k, o, q, v
def a__ ( snake_case , snake_case , snake_case , snake_case=False ):
"""simple docstring"""
if split_mlp_wi:
__SCREAMING_SNAKE_CASE : Dict = params[F'''{prefix}/layers_{i}/mlp/wi_0/kernel''']
__SCREAMING_SNAKE_CASE : Any = params[F'''{prefix}/layers_{i}/mlp/wi_1/kernel''']
__SCREAMING_SNAKE_CASE : Any = (wi_a, wi_a)
else:
__SCREAMING_SNAKE_CASE : List[Any] = params[F'''{prefix}/layers_{i}/mlp/wi/kernel''']
__SCREAMING_SNAKE_CASE : Optional[Any] = params[F'''{prefix}/layers_{i}/mlp/wo/kernel''']
return wi, wo
def a__ ( snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
return params[F'''{prefix}/layers_{i}/{layer_name}/scale''']
def a__ ( snake_case , *, snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = traverse_util.flatten_dict(variables['''target'''] )
__SCREAMING_SNAKE_CASE : Tuple = {'''/'''.join(snake_case ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__SCREAMING_SNAKE_CASE : List[str] = '''encoder/layers_0/mlp/wi_0/kernel''' in old
print('''Split MLP:''' , snake_case )
__SCREAMING_SNAKE_CASE : Optional[int] = collections.OrderedDict()
# Shared embeddings.
__SCREAMING_SNAKE_CASE : Optional[Any] = old['''token_embedder/embedding''']
# Encoder.
for i in range(snake_case ):
# Block i, layer 0 (Self Attention).
__SCREAMING_SNAKE_CASE : Optional[int] = tax_layer_norm_lookup(snake_case , snake_case , '''encoder''' , '''pre_attention_layer_norm''' )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = tax_attention_lookup(snake_case , snake_case , '''encoder''' , '''attention''' )
__SCREAMING_SNAKE_CASE : int = layer_norm
__SCREAMING_SNAKE_CASE : Any = k.T
__SCREAMING_SNAKE_CASE : Any = o.T
__SCREAMING_SNAKE_CASE : int = q.T
__SCREAMING_SNAKE_CASE : List[str] = v.T
# Block i, layer 1 (MLP).
__SCREAMING_SNAKE_CASE : Union[str, Any] = tax_layer_norm_lookup(snake_case , snake_case , '''encoder''' , '''pre_mlp_layer_norm''' )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = tax_mlp_lookup(snake_case , snake_case , '''encoder''' , snake_case )
__SCREAMING_SNAKE_CASE : Dict = layer_norm
if split_mlp_wi:
__SCREAMING_SNAKE_CASE : str = wi[0].T
__SCREAMING_SNAKE_CASE : List[Any] = wi[1].T
else:
__SCREAMING_SNAKE_CASE : Dict = wi.T
__SCREAMING_SNAKE_CASE : List[Any] = wo.T
__SCREAMING_SNAKE_CASE : Union[str, Any] = old[
'''encoder/relpos_bias/rel_embedding'''
].T
__SCREAMING_SNAKE_CASE : Optional[int] = old['''encoder/encoder_norm/scale''']
if not is_encoder_only:
# Decoder.
for i in range(snake_case ):
# Block i, layer 0 (Self Attention).
__SCREAMING_SNAKE_CASE : List[str] = tax_layer_norm_lookup(snake_case , snake_case , '''decoder''' , '''pre_self_attention_layer_norm''' )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = tax_attention_lookup(snake_case , snake_case , '''decoder''' , '''self_attention''' )
__SCREAMING_SNAKE_CASE : Dict = layer_norm
__SCREAMING_SNAKE_CASE : Optional[Any] = k.T
__SCREAMING_SNAKE_CASE : Union[str, Any] = o.T
__SCREAMING_SNAKE_CASE : Optional[Any] = q.T
__SCREAMING_SNAKE_CASE : Union[str, Any] = v.T
# Block i, layer 1 (Cross Attention).
__SCREAMING_SNAKE_CASE : Union[str, Any] = tax_layer_norm_lookup(snake_case , snake_case , '''decoder''' , '''pre_cross_attention_layer_norm''' )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = tax_attention_lookup(snake_case , snake_case , '''decoder''' , '''encoder_decoder_attention''' )
__SCREAMING_SNAKE_CASE : List[str] = layer_norm
__SCREAMING_SNAKE_CASE : int = k.T
__SCREAMING_SNAKE_CASE : int = o.T
__SCREAMING_SNAKE_CASE : Optional[Any] = q.T
__SCREAMING_SNAKE_CASE : List[str] = v.T
# Block i, layer 2 (MLP).
__SCREAMING_SNAKE_CASE : Tuple = tax_layer_norm_lookup(snake_case , snake_case , '''decoder''' , '''pre_mlp_layer_norm''' )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = tax_mlp_lookup(snake_case , snake_case , '''decoder''' , snake_case )
__SCREAMING_SNAKE_CASE : Any = layer_norm
if split_mlp_wi:
__SCREAMING_SNAKE_CASE : List[Any] = wi[0].T
__SCREAMING_SNAKE_CASE : Optional[Any] = wi[1].T
else:
__SCREAMING_SNAKE_CASE : Dict = wi.T
__SCREAMING_SNAKE_CASE : Optional[Any] = wo.T
__SCREAMING_SNAKE_CASE : Optional[int] = old['''decoder/decoder_norm/scale''']
__SCREAMING_SNAKE_CASE : Optional[int] = old[
'''decoder/relpos_bias/rel_embedding'''
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__SCREAMING_SNAKE_CASE : int = old['''decoder/logits_dense/kernel'''].T
return new
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__SCREAMING_SNAKE_CASE : Optional[Any] = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__SCREAMING_SNAKE_CASE : str = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = state_dict['''shared.weight''']
return state_dict
def a__ ( snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = checkpoints.load_tax_checkpoint(snake_case )
__SCREAMING_SNAKE_CASE : Dict = convert_tax_to_pytorch(snake_case , num_layers=config.num_layers , is_encoder_only=snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = make_state_dict(snake_case , snake_case )
model.load_state_dict(snake_case , strict=snake_case )
def a__ ( snake_case , snake_case , snake_case , snake_case = False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = TaConfig.from_json_file(snake_case )
print(F'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__SCREAMING_SNAKE_CASE : Optional[int] = TaEncoderModel(snake_case )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = TaForConditionalGeneration(snake_case )
# Load weights from tf checkpoint
load_tax_weights_in_ta(snake_case , snake_case , snake_case , snake_case )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(snake_case )
# Verify that we can load the checkpoint.
model.from_pretrained(snake_case )
print('''Done''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
lowercase_ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 303 |
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 303 | 1 |
"""simple docstring"""
def _lowerCAmelCase ( UpperCamelCase_ ):
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
__SCREAMING_SNAKE_CASE = len(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = max(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = min(UpperCamelCase_ )
# create the counting array
__SCREAMING_SNAKE_CASE = coll_max + 1 - coll_min
__SCREAMING_SNAKE_CASE = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = counting_arr[i] + counting_arr[i - 1]
# create the output collection
__SCREAMING_SNAKE_CASE = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , UpperCamelCase_ ) ):
__SCREAMING_SNAKE_CASE = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def _lowerCAmelCase ( UpperCamelCase_ ):
return "".join([chr(UpperCamelCase_ ) for i in counting_sort([ord(UpperCamelCase_ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("thisisthestring") == "eghhiiinrsssttt"
__magic_name__ = input("Enter numbers separated by a comma:\n").strip()
__magic_name__ = [int(item) for item in user_input.split(",")]
print(counting_sort(unsorted))
| 255 |
"""simple docstring"""
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
__magic_name__ = False
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self , lowerCAmelCase__=3_2):
set_seed(0)
__SCREAMING_SNAKE_CASE = UNetaDModel(sample_size=lowerCAmelCase__ , in_channels=3 , out_channels=3)
__SCREAMING_SNAKE_CASE = torch.optim.SGD(model.parameters() , lr=0.00_01)
return model, optimizer
@slow
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = """cpu""" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
__SCREAMING_SNAKE_CASE = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_start=0.00_01 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_start=0.00_01 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=lowerCAmelCase__ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0)
__SCREAMING_SNAKE_CASE = [torch.randn((4, 3, 3_2, 3_2)).clip(-1 , 1).to(lowerCAmelCase__) for _ in range(4)]
__SCREAMING_SNAKE_CASE = [torch.randn((4, 3, 3_2, 3_2)).to(lowerCAmelCase__) for _ in range(4)]
__SCREAMING_SNAKE_CASE = [torch.randint(0 , 1_0_0_0 , (4,)).long().to(lowerCAmelCase__) for _ in range(4)]
# train with a DDPM scheduler
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.get_model_optimizer(resolution=3_2)
model.train().to(lowerCAmelCase__)
for i in range(4):
optimizer.zero_grad()
__SCREAMING_SNAKE_CASE = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i])
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , timesteps[i]).sample
__SCREAMING_SNAKE_CASE = torch.nn.functional.mse_loss(lowerCAmelCase__ , noise[i])
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.get_model_optimizer(resolution=3_2)
model.train().to(lowerCAmelCase__)
for i in range(4):
optimizer.zero_grad()
__SCREAMING_SNAKE_CASE = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i])
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , timesteps[i]).sample
__SCREAMING_SNAKE_CASE = torch.nn.functional.mse_loss(lowerCAmelCase__ , noise[i])
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-5))
self.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-5))
| 255 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
_lowerCAmelCase : Any = logging.get_logger(__name__)
_lowerCAmelCase : Union[str, Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCAmelCase : str = {
'''vocab_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/vocab.txt''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/vocab.txt''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'''
),
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'''
),
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt''',
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'''
),
'''bert-base-multilingual-cased''': (
'''https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-cased''': (
'''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'''
),
},
}
_lowerCAmelCase : Optional[int] = {
'''bert-base-uncased''': 512,
'''bert-large-uncased''': 512,
'''bert-base-cased''': 512,
'''bert-large-cased''': 512,
'''bert-base-multilingual-uncased''': 512,
'''bert-base-multilingual-cased''': 512,
'''bert-base-chinese''': 512,
'''bert-base-german-cased''': 512,
'''bert-large-uncased-whole-word-masking''': 512,
'''bert-large-cased-whole-word-masking''': 512,
'''bert-large-uncased-whole-word-masking-finetuned-squad''': 512,
'''bert-large-cased-whole-word-masking-finetuned-squad''': 512,
'''bert-base-cased-finetuned-mrpc''': 512,
'''bert-base-german-dbmdz-cased''': 512,
'''bert-base-german-dbmdz-uncased''': 512,
'''TurkuNLP/bert-base-finnish-cased-v1''': 512,
'''TurkuNLP/bert-base-finnish-uncased-v1''': 512,
'''wietsedv/bert-base-dutch-cased''': 512,
}
_lowerCAmelCase : int = {
'''bert-base-uncased''': {'''do_lower_case''': True},
'''bert-large-uncased''': {'''do_lower_case''': True},
'''bert-base-cased''': {'''do_lower_case''': False},
'''bert-large-cased''': {'''do_lower_case''': False},
'''bert-base-multilingual-uncased''': {'''do_lower_case''': True},
'''bert-base-multilingual-cased''': {'''do_lower_case''': False},
'''bert-base-chinese''': {'''do_lower_case''': False},
'''bert-base-german-cased''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': False},
'''bert-base-cased-finetuned-mrpc''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-cased''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-uncased''': {'''do_lower_case''': True},
'''TurkuNLP/bert-base-finnish-cased-v1''': {'''do_lower_case''': False},
'''TurkuNLP/bert-base-finnish-uncased-v1''': {'''do_lower_case''': True},
'''wietsedv/bert-base-dutch-cased''': {'''do_lower_case''': False},
}
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = BertTokenizer
def __init__( self :int , snake_case :Optional[Any]=None , snake_case :Optional[Any]=None , snake_case :Optional[int]=True , snake_case :Optional[int]="[UNK]" , snake_case :List[Any]="[SEP]" , snake_case :List[Any]="[PAD]" , snake_case :Dict="[CLS]" , snake_case :Dict="[MASK]" , snake_case :Tuple=True , snake_case :Optional[Any]=None , **snake_case :Union[str, Any] , ):
'''simple docstring'''
super().__init__(
snake_case , tokenizer_file=snake_case , do_lower_case=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , tokenize_chinese_chars=snake_case , strip_accents=snake_case , **snake_case , )
A_ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , snake_case ) != do_lower_case
or normalizer_state.get("strip_accents" , snake_case ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , snake_case ) != tokenize_chinese_chars
):
A_ : Union[str, Any] = getattr(snake_case , normalizer_state.pop("type" ) )
A_ : Dict = do_lower_case
A_ : List[Any] = strip_accents
A_ : Optional[Any] = tokenize_chinese_chars
A_ : List[Any] = normalizer_class(**snake_case )
A_ : Union[str, Any] = do_lower_case
def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :Any , snake_case :Union[str, Any]=None ):
'''simple docstring'''
A_ : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE ( self :str , snake_case :List[int] , snake_case :Optional[List[int]] = None ):
'''simple docstring'''
A_ : Dict = [self.sep_token_id]
A_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :str , snake_case :Optional[str] = None ):
'''simple docstring'''
A_ : Any = self._tokenizer.model.save(snake_case , name=snake_case )
return tuple(snake_case )
| 300 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : Optional[Any] = {
'''facebook/data2vec-vision-base-ft''': (
'''https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'''
),
}
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = '''data2vec-vision'''
def __init__( self :int , snake_case :Optional[int]=768 , snake_case :Any=12 , snake_case :Any=12 , snake_case :Tuple=3_072 , snake_case :Any="gelu" , snake_case :Tuple=0.0 , snake_case :int=0.0 , snake_case :Any=0.02 , snake_case :str=1e-12 , snake_case :List[str]=224 , snake_case :Dict=16 , snake_case :int=3 , snake_case :int=False , snake_case :str=False , snake_case :List[Any]=False , snake_case :Optional[Any]=False , snake_case :Tuple=0.1 , snake_case :Optional[Any]=0.1 , snake_case :Any=True , snake_case :Optional[Any]=[3, 5, 7, 11] , snake_case :Dict=[1, 2, 3, 6] , snake_case :int=True , snake_case :List[Any]=0.4 , snake_case :Any=256 , snake_case :Union[str, Any]=1 , snake_case :Union[str, Any]=False , snake_case :Any=255 , **snake_case :int , ):
'''simple docstring'''
super().__init__(**snake_case )
A_ : Dict = hidden_size
A_ : Tuple = num_hidden_layers
A_ : List[str] = num_attention_heads
A_ : Any = intermediate_size
A_ : Optional[Any] = hidden_act
A_ : Any = hidden_dropout_prob
A_ : List[str] = attention_probs_dropout_prob
A_ : Optional[Any] = initializer_range
A_ : List[str] = layer_norm_eps
A_ : str = image_size
A_ : Optional[int] = patch_size
A_ : int = num_channels
A_ : Optional[Any] = use_mask_token
A_ : Optional[Any] = use_absolute_position_embeddings
A_ : Optional[int] = use_relative_position_bias
A_ : Dict = use_shared_relative_position_bias
A_ : Any = layer_scale_init_value
A_ : Optional[Any] = drop_path_rate
A_ : Dict = use_mean_pooling
# decode head attributes (semantic segmentation)
A_ : Tuple = out_indices
A_ : Optional[Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
A_ : str = use_auxiliary_head
A_ : List[Any] = auxiliary_loss_weight
A_ : List[str] = auxiliary_channels
A_ : Dict = auxiliary_num_convs
A_ : List[str] = auxiliary_concat_input
A_ : Optional[int] = semantic_loss_ignore_index
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
return 1e-4
| 300 | 1 |
import numpy as np
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : np.array ) -> np.array:
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 177 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json',
}
class a ( _A ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = 'data2vec-text'
def __init__( self : Optional[Any] , __snake_case : Optional[int]=3_05_22 , __snake_case : List[str]=7_68 , __snake_case : Tuple=12 , __snake_case : int=12 , __snake_case : Union[str, Any]=30_72 , __snake_case : List[Any]="gelu" , __snake_case : Any=0.1 , __snake_case : Union[str, Any]=0.1 , __snake_case : Tuple=5_12 , __snake_case : str=2 , __snake_case : str=0.02 , __snake_case : List[Any]=1E-12 , __snake_case : Any=1 , __snake_case : List[Any]=0 , __snake_case : Dict=2 , __snake_case : Any="absolute" , __snake_case : Union[str, Any]=True , __snake_case : Any=None , **__snake_case : List[Any] , ):
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = classifier_dropout
class a ( _A ):
'''simple docstring'''
@property
def lowerCamelCase_ ( self : str ):
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase_ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 177 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ = {
"""configuration_maskformer""": ["""MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MaskFormerConfig"""],
"""configuration_maskformer_swin""": ["""MaskFormerSwinConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""MaskFormerFeatureExtractor"""]
a_ = ["""MaskFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MaskFormerForInstanceSegmentation""",
"""MaskFormerModel""",
"""MaskFormerPreTrainedModel""",
]
a_ = [
"""MaskFormerSwinBackbone""",
"""MaskFormerSwinModel""",
"""MaskFormerSwinPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 330 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_A = 16
_A = 32
def lowerCamelCase__ ( a__ : Accelerator , a__ : int = 16 ) -> Tuple:
UpperCamelCase_ = AutoTokenizer.from_pretrained("""bert-base-cased""" )
UpperCamelCase_ = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(a__ : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase_ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=a__ , max_length=a__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase_ = datasets.map(
a__ , batched=a__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase_ = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(a__ : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase_ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase_ = 16
elif accelerator.mixed_precision != "no":
UpperCamelCase_ = 8
else:
UpperCamelCase_ = None
return tokenizer.pad(
a__ , padding="""longest""" , max_length=a__ , pad_to_multiple_of=a__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
UpperCamelCase_ = DataLoader(
tokenized_datasets["""train"""] , shuffle=a__ , collate_fn=a__ , batch_size=a__ )
UpperCamelCase_ = DataLoader(
tokenized_datasets["""validation"""] , shuffle=a__ , collate_fn=a__ , batch_size=a__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_A = mocked_dataloaders # noqa: F811
def lowerCamelCase__ ( a__ : str , a__ : Tuple ) -> Any:
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , a__ ) == "1":
UpperCamelCase_ = 2
# New Code #
UpperCamelCase_ = int(args.gradient_accumulation_steps )
# Initialize accelerator
UpperCamelCase_ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=a__ )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"""Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase_ = config["""lr"""]
UpperCamelCase_ = int(config["""num_epochs"""] )
UpperCamelCase_ = int(config["""seed"""] )
UpperCamelCase_ = int(config["""batch_size"""] )
UpperCamelCase_ = evaluate.load("""glue""" , """mrpc""" )
set_seed(a__ )
UpperCamelCase_ , UpperCamelCase_ = get_dataloaders(a__ , a__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase_ = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=a__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase_ = model.to(accelerator.device )
# Instantiate optimizer
UpperCamelCase_ = AdamW(params=model.parameters() , lr=a__ )
# Instantiate scheduler
UpperCamelCase_ = get_linear_schedule_with_warmup(
optimizer=a__ , num_warmup_steps=100 , num_training_steps=(len(a__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = accelerator.prepare(
a__ , a__ , a__ , a__ , a__ )
# Now we train the model
for epoch in range(a__ ):
model.train()
for step, batch in enumerate(a__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(a__ ):
UpperCamelCase_ = model(**a__ )
UpperCamelCase_ = output.loss
accelerator.backward(a__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(a__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase_ = model(**a__ )
UpperCamelCase_ = outputs.logits.argmax(dim=-1 )
UpperCamelCase_ , UpperCamelCase_ = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=a__ , references=a__ , )
UpperCamelCase_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , a__ )
def lowerCamelCase__ ( ) -> str:
UpperCamelCase_ = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=a__ , default=a__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=a__ , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(a__ , a__ )
if __name__ == "__main__":
main()
| 122 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class _lowercase ( __a ):
"""simple docstring"""
lowercase__ = '''gpt_neox'''
def __init__( self : List[Any] , UpperCamelCase__ : Optional[Any]=50432 , UpperCamelCase__ : str=6144 , UpperCamelCase__ : Tuple=44 , UpperCamelCase__ : Dict=64 , UpperCamelCase__ : List[str]=24576 , UpperCamelCase__ : str="gelu" , UpperCamelCase__ : Tuple=0.25 , UpperCamelCase__ : Any=10000 , UpperCamelCase__ : Union[str, Any]=0.0 , UpperCamelCase__ : int=0.0 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : int=2048 , UpperCamelCase__ : Tuple=0.02 , UpperCamelCase__ : int=1E-5 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Optional[int]=0 , UpperCamelCase__ : int=2 , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : List[Any]=None , **UpperCamelCase__ : List[Any] , ) -> List[str]:
'''simple docstring'''
super().__init__(bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
__UpperCamelCase =vocab_size
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_act
__UpperCamelCase =rotary_pct
__UpperCamelCase =rotary_emb_base
__UpperCamelCase =attention_dropout
__UpperCamelCase =hidden_dropout
__UpperCamelCase =classifier_dropout
__UpperCamelCase =initializer_range
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =use_cache
__UpperCamelCase =tie_word_embeddings
__UpperCamelCase =use_parallel_residual
__UpperCamelCase =rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , UpperCamelCase__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"""got {self.rope_scaling}""" )
__UpperCamelCase =self.rope_scaling.get('''type''' , UpperCamelCase__ )
__UpperCamelCase =self.rope_scaling.get('''factor''' , UpperCamelCase__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 85 | """simple docstring"""
import os
from pathlib import Path
def lowerCAmelCase ():
"""simple docstring"""
from torch.utils.cpp_extension import load
__UpperCamelCase =Path(__UpperCamelCase ).resolve().parent.parent.parent / '''kernels''' / '''deformable_detr'''
__UpperCamelCase =[
root / filename
for filename in [
'''vision.cpp''',
os.path.join('''cpu''' , '''ms_deform_attn_cpu.cpp''' ),
os.path.join('''cuda''' , '''ms_deform_attn_cuda.cu''' ),
]
]
load(
'''MultiScaleDeformableAttention''' , __UpperCamelCase , with_cuda=__UpperCamelCase , extra_include_paths=[str(__UpperCamelCase )] , extra_cflags=['''-DWITH_CUDA=1'''] , extra_cuda_cflags=[
'''-DCUDA_HAS_FP16=1''',
'''-D__CUDA_NO_HALF_OPERATORS__''',
'''-D__CUDA_NO_HALF_CONVERSIONS__''',
'''-D__CUDA_NO_HALF2_OPERATORS__''',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 85 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __A ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Any ,_snake_case : Optional[Any] ,_snake_case : List[Any]=7 ,_snake_case : str=3 ,_snake_case : Optional[int]=18 ,_snake_case : Union[str, Any]=30 ,_snake_case : Any=400 ,_snake_case : Union[str, Any]=True ,_snake_case : Tuple=None ,_snake_case : Any=True ,) -> List[Any]:
"""simple docstring"""
lowercase__ : Any = size if size is not None else {'''height''': 18, '''width''': 18}
lowercase__ : str = parent
lowercase__ : Union[str, Any] = batch_size
lowercase__ : Dict = num_channels
lowercase__ : int = image_size
lowercase__ : Optional[Any] = min_resolution
lowercase__ : Union[str, Any] = max_resolution
lowercase__ : Dict = do_resize
lowercase__ : Optional[Any] = size
lowercase__ : Optional[Any] = apply_ocr
def UpperCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __A ( A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : List[str] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def UpperCAmelCase ( self : str ) -> str:
"""simple docstring"""
lowercase__ : Optional[Any] = LayoutLMvaImageProcessingTester(self )
@property
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_snake_case ,'''do_resize''' ) )
self.assertTrue(hasattr(_snake_case ,'''size''' ) )
self.assertTrue(hasattr(_snake_case ,'''apply_ocr''' ) )
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'''height''': 18, '''width''': 18} )
lowercase__ : Any = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{'''height''': 42, '''width''': 42} )
def UpperCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
pass
def UpperCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
lowercase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case ,Image.Image )
# Test not batched input
lowercase__ : Union[str, Any] = image_processing(image_inputs[0] ,return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
self.assertIsInstance(encoding.words ,_snake_case )
self.assertIsInstance(encoding.boxes ,_snake_case )
# Test batched
lowercase__ : List[str] = image_processing(_snake_case ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
def UpperCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_snake_case ,numpify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case ,np.ndarray )
# Test not batched input
lowercase__ : Union[str, Any] = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
# Test batched
lowercase__ : Optional[Any] = image_processing(_snake_case ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ : Any = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_snake_case ,torchify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case ,torch.Tensor )
# Test not batched input
lowercase__ : str = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
# Test batched
lowercase__ : Optional[int] = image_processing(_snake_case ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : str = LayoutLMvaImageProcessor()
from datasets import load_dataset
lowercase__ : List[Any] = load_dataset('''hf-internal-testing/fixtures_docvqa''' ,split='''test''' )
lowercase__ : Optional[Any] = Image.open(ds[0]['''file'''] ).convert('''RGB''' )
lowercase__ : List[str] = image_processing(_snake_case ,return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) ,len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
lowercase__ : Any = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
lowercase__ : Any = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words ,_snake_case )
self.assertListEqual(encoding.boxes ,_snake_case )
# with apply_OCR = False
lowercase__ : int = LayoutLMvaImageProcessor(apply_ocr=_snake_case )
lowercase__ : Optional[Any] = image_processing(_snake_case ,return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) )
| 16 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
def __snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def __snake_case ( UpperCAmelCase_ : int ):
lowerCamelCase_ = []
lowerCamelCase_ = 11
lowerCamelCase_ = int("1" + "0" * digit_len )
for num in range(UpperCAmelCase_ , UpperCAmelCase_ ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(UpperCAmelCase_ , UpperCAmelCase_ ):
solutions.append(F'''{num}/{den}''' )
den += 1
num += 1
lowerCamelCase_ = 10
return solutions
def __snake_case ( UpperCAmelCase_ : int = 2 ):
lowerCamelCase_ = 1.0
for fraction in fraction_list(UpperCAmelCase_ ):
lowerCamelCase_ = Fraction(UpperCAmelCase_ )
result *= frac.denominator / frac.numerator
return int(UpperCAmelCase_ )
if __name__ == "__main__":
print(solution())
| 55 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCamelCase__ : Any = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Union[str, Any] = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : int = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
UpperCamelCase__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 164 |
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def UpperCAmelCase ( a_ ) -> Any:
"""simple docstring"""
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase ) -> str:
super().__init__()
A_ : Any = module
A_ : Any = nn.Sequential(
nn.Linear(module.in_features , _lowerCamelCase , bias=_lowerCamelCase ) , nn.Linear(_lowerCamelCase , module.out_features , bias=_lowerCamelCase ) , )
A_ : Union[str, Any] = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=_lowerCamelCase )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def UpperCAmelCase_ ( self , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ) -> List[Any]:
return self.module(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ) + self.adapter(_lowerCamelCase )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = '''bigscience/bloom-1b7'''
# Constant values
lowerCamelCase = 2.1_0_9_6_5_9_5_5_2_6_9_2_5_7_4
lowerCamelCase = '''Hello my name is'''
lowerCamelCase = set()
EXPECTED_OUTPUTS.add('''Hello my name is John and I am a professional photographer. I''' )
EXPECTED_OUTPUTS.add('''Hello my name is John.\nI am a friend of your father.\n''' )
EXPECTED_OUTPUTS.add('''Hello my name is John Doe, I am a student at the University''' )
lowerCamelCase = 10
def UpperCAmelCase_ ( self ) -> List[str]:
# Models and tokenizer
A_ : List[str] = AutoTokenizer.from_pretrained(self.model_name )
class _lowerCAmelCase ( __A ):
"""simple docstring"""
def UpperCAmelCase_ ( self ) -> Optional[Any]:
super().setUp()
# Models and tokenizer
A_ : List[str] = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="""auto""" )
A_ : List[Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_lowerCamelCase , device_map="""auto""" )
def UpperCAmelCase_ ( self ) -> Optional[int]:
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self ) -> List[Any]:
A_ : str = self.model_abit.config
self.assertTrue(hasattr(_lowerCamelCase , """quantization_config""" ) )
A_ : Union[str, Any] = config.to_dict()
A_ : Optional[int] = config.to_diff_dict()
A_ : Tuple = config.to_json_string()
def UpperCAmelCase_ ( self ) -> str:
from bitsandbytes.nn import Paramsabit
A_ : List[Any] = self.model_fpaa.get_memory_footprint()
A_ : Tuple = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
A_ : Union[str, Any] = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def UpperCAmelCase_ ( self ) -> List[str]:
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(_lowerCamelCase , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : List[str] = self.tokenizer(self.input_text , return_tensors="""pt""" )
A_ : int = self.model_abit.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_lowerCamelCase ) , self.EXPECTED_OUTPUTS )
def UpperCAmelCase_ ( self ) -> Any:
A_ : Dict = BitsAndBytesConfig()
A_ : Tuple = True
A_ : Any = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=_lowerCamelCase , device_map="""auto""" )
A_ : Optional[int] = self.tokenizer(self.input_text , return_tensors="""pt""" )
A_ : Optional[Any] = model_abit_from_config.generate(
input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_lowerCamelCase ) , self.EXPECTED_OUTPUTS )
def UpperCAmelCase_ ( self ) -> List[Any]:
with self.assertRaises(_lowerCamelCase ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> List[str]:
A_ : Union[str, Any] = BitsAndBytesConfig()
with self.assertRaises(_lowerCamelCase ):
A_ : List[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=_lowerCamelCase , load_in_abit=_lowerCamelCase , device_map="""auto""" , bnb_abit_quant_type="""nf4""" , )
def UpperCAmelCase_ ( self ) -> str:
with self.assertRaises(_lowerCamelCase ):
# Tries with `str`
self.model_abit.to("""cpu""" )
with self.assertRaises(_lowerCamelCase ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(_lowerCamelCase ):
# Tries with a `device`
self.model_abit.to(torch.device("""cuda:0""" ) )
with self.assertRaises(_lowerCamelCase ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(_lowerCamelCase ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
A_ : Optional[int] = self.tokenizer(self.input_text , return_tensors="""pt""" )
A_ : Tuple = self.model_fpaa.to(torch.floataa )
A_ : int = self.model_fpaa.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
A_ : Any = self.model_fpaa.to("""cpu""" )
# Check this does not throw an error
A_ : str = self.model_fpaa.half()
# Check this does not throw an error
A_ : Any = self.model_fpaa.float()
def UpperCAmelCase_ ( self ) -> Dict:
A_ : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained("""t5-small""" , load_in_abit=_lowerCamelCase , device_map="""auto""" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def UpperCAmelCase_ ( cls ) -> Optional[int]:
A_ : Optional[int] = """t5-small"""
A_ : List[str] = """google/flan-t5-small""" # flan-t5 uses dense-act instead of dense-relu-dense
A_ : List[str] = AutoTokenizer.from_pretrained(cls.model_name )
A_ : Optional[Any] = """Translate in German: Hello, my dog is cute"""
def UpperCAmelCase_ ( self ) -> Optional[Any]:
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
from transformers import TaForConditionalGeneration
A_ : Optional[int] = TaForConditionalGeneration._keep_in_fpaa_modules
A_ : Any = None
# test with `t5-small`
A_ : int = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_lowerCamelCase , device_map="""auto""" )
A_ : Optional[Any] = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A_ : Optional[int] = model.generate(**_lowerCamelCase )
# test with `flan-t5-small`
A_ : Tuple = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=_lowerCamelCase , device_map="""auto""" )
A_ : Dict = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A_ : str = model.generate(**_lowerCamelCase )
A_ : Optional[int] = modules
def UpperCAmelCase_ ( self ) -> List[Any]:
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
A_ : str = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_lowerCamelCase , device_map="""auto""" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
A_ : Any = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A_ : List[Any] = model.generate(**_lowerCamelCase )
# test with `flan-t5-small`
A_ : Union[str, Any] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=_lowerCamelCase , device_map="""auto""" )
A_ : int = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A_ : Optional[int] = model.generate(**_lowerCamelCase )
class _lowerCAmelCase ( __A ):
"""simple docstring"""
def UpperCAmelCase_ ( self ) -> int:
super().setUp()
# model_name
A_ : Dict = """bigscience/bloom-560m"""
A_ : Union[str, Any] = """t5-small"""
# Different types of model
A_ : Optional[Any] = AutoModel.from_pretrained(self.model_name , load_in_abit=_lowerCamelCase , device_map="""auto""" )
# Sequence classification model
A_ : Dict = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=_lowerCamelCase , device_map="""auto""" )
# CausalLM model
A_ : Any = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_lowerCamelCase , device_map="""auto""" )
# Seq2seq model
A_ : Tuple = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=_lowerCamelCase , device_map="""auto""" )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self ) -> List[Any]:
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class _lowerCAmelCase ( __A ):
"""simple docstring"""
def UpperCAmelCase_ ( self ) -> str:
super().setUp()
def UpperCAmelCase_ ( self ) -> Any:
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : List[str] = pipeline(
"""text-generation""" , model=self.model_name , model_kwargs={"""device_map""": """auto""", """load_in_4bit""": True, """torch_dtype""": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
A_ : int = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["""generated_text"""] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class _lowerCAmelCase ( __A ):
"""simple docstring"""
def UpperCAmelCase_ ( self ) -> str:
super().setUp()
def UpperCAmelCase_ ( self ) -> str:
A_ : List[str] = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=_lowerCamelCase , device_map="""balanced""" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
A_ : str = self.tokenizer(self.input_text , return_tensors="""pt""" )
# Second real batch
A_ : int = model_parallel.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=_lowerCamelCase ) , self.EXPECTED_OUTPUTS )
class _lowerCAmelCase ( __A ):
"""simple docstring"""
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Union[str, Any] = """facebook/opt-350m"""
super().setUp()
def UpperCAmelCase_ ( self ) -> Optional[Any]:
if version.parse(importlib.metadata.version("""bitsandbytes""" ) ) < version.parse("""0.37.0""" ):
return
# Step 1: freeze all parameters
A_ : Any = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_lowerCamelCase )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
A_ : Optional[Any] = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
A_ : Any = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(_lowerCamelCase ) ):
A_ : int = LoRALayer(module.q_proj , rank=16 )
A_ : Optional[int] = LoRALayer(module.k_proj , rank=16 )
A_ : Union[str, Any] = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
A_ : Dict = self.tokenizer("""Test batch """ , return_tensors="""pt""" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
A_ : Dict = model.forward(**_lowerCamelCase )
out.logits.norm().backward()
for module in model.modules():
if isinstance(_lowerCamelCase , _lowerCamelCase ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(_lowerCamelCase , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = '''gpt2-xl'''
lowerCamelCase = 3.3_1_9_1_8_5_4_8_5_4_1_5_2_1_8_7
| 164 | 1 |
__a = {
0: '0',
1: '1',
2: '2',
3: '3',
4: '4',
5: '5',
6: '6',
7: '7',
8: '8',
9: '9',
1_0: 'a',
1_1: 'b',
1_2: 'c',
1_3: 'd',
1_4: 'e',
1_5: 'f',
}
def a ( snake_case__: Dict ):
'''simple docstring'''
assert type(snake_case__ ) in (int, float) and decimal == int(snake_case__ )
lowercase_ = int(snake_case__ )
lowercase_ = ''''''
lowercase_ = False
if decimal < 0:
lowercase_ = True
decimal *= -1
while decimal > 0:
lowercase_ = divmod(snake_case__ , 16 )
lowercase_ = values[remainder] + hexadecimal
lowercase_ = '''0x''' + hexadecimal
if negative:
lowercase_ = '''-''' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30 |
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def snake_case( __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def snake_case( ) -> Optional[Any]:
'''simple docstring'''
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def snake_case( ) -> int:
'''simple docstring'''
lowercase : List[str] = '''mock-s3-bucket'''
lowercase : Optional[int] = F"""s3://{mock_bucket}"""
lowercase : List[Any] = extract_path_from_uri(__magic_name__ )
assert dataset_path.startswith('''s3://''' ) is False
lowercase : Optional[int] = '''./local/path'''
lowercase : Dict = extract_path_from_uri(__magic_name__ )
assert dataset_path == new_dataset_path
def snake_case( __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
lowercase : Tuple = is_remote_filesystem(__magic_name__ )
assert is_remote is True
lowercase : int = fsspec.filesystem('''file''' )
lowercase : Optional[Any] = is_remote_filesystem(__magic_name__ )
assert is_remote is False
@pytest.mark.parametrize('''compression_fs_class''' , __magic_name__ )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[Any] = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file}
lowercase : List[Any] = input_paths[compression_fs_class.protocol]
if input_path is None:
lowercase : Dict = F"""for '{compression_fs_class.protocol}' compression protocol, """
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__magic_name__ )
lowercase : Any = fsspec.filesystem(compression_fs_class.protocol , fo=__magic_name__ )
assert isinstance(__magic_name__ , __magic_name__ )
lowercase : List[Any] = os.path.basename(__magic_name__ )
lowercase : Tuple = expected_filename[: expected_filename.rindex('''.''' )]
assert fs.glob('''*''' ) == [expected_filename]
with fs.open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as f, open(__magic_name__ , encoding='''utf-8''' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[Any] = {'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path}
lowercase : List[str] = compressed_file_paths[protocol]
lowercase : str = '''dataset.jsonl'''
lowercase : List[str] = F"""{protocol}://{member_file_path}::{compressed_file_path}"""
lowercase , *lowercase : Tuple = fsspec.get_fs_token_paths(__magic_name__ )
assert fs.isfile(__magic_name__ )
assert not fs.isfile('''non_existing_''' + member_file_path )
@pytest.mark.integration
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict:
'''simple docstring'''
lowercase : Optional[Any] = hf_api.dataset_info(__magic_name__ , token=__magic_name__ )
lowercase : int = HfFileSystem(repo_info=__magic_name__ , token=__magic_name__ )
assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"]
assert hffs.isdir('''data''' )
assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' )
with open(__magic_name__ ) as f:
assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read()
def snake_case( ) -> List[Any]:
'''simple docstring'''
lowercase : List[Any] = '''bz2'''
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(__magic_name__ , __magic_name__ , clobber=__magic_name__ )
with pytest.warns(__magic_name__ ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(__magic_name__ ) == 1
assert (
str(warning_info[0].message )
== F"""A filesystem protocol was already set for {protocol} and will be overwritten."""
) | 308 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
a__: Any = {
'configuration_longt5': ['LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LongT5Config', 'LongT5OnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__: int = [
'LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongT5EncoderModel',
'LongT5ForConditionalGeneration',
'LongT5Model',
'LongT5PreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__: str = [
'FlaxLongT5ForConditionalGeneration',
'FlaxLongT5Model',
'FlaxLongT5PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
a__: Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 351 |
# Algorithm for the pigeonhole sorting
def UpperCamelCase__( UpperCamelCase__ : int )->str:
A__ = min(UpperCamelCase__ ) # min() finds the minimum value
A__ = max(UpperCamelCase__ ) # max() finds the maximum value
A__ = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
A__ = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
A__ = 0
for count in range(UpperCamelCase__ ):
while holes[count] > 0:
holes[count] -= 1
A__ = count + min_val
i += 1
def UpperCamelCase__( )->Tuple:
A__ = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(UpperCamelCase__ )
print('''Sorted order is:''' , ''' '''.join(UpperCamelCase__ ) )
if __name__ == "__main__":
main()
| 39 | 0 |
'''simple docstring'''
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def __snake_case( _lowerCAmelCase ) -> Tuple:
if not is_accelerate_available():
return method
snake_case__ : List[Any] = version.parse(accelerate.__version__ ).base_version
if version.parse(_lowerCAmelCase ) < version.parse("""0.17.0""" ):
return method
def wrapper(self , *_lowerCAmelCase , **_lowerCAmelCase ):
if hasattr(self , """_hf_hook""" ) and hasattr(self._hf_hook , """pre_forward""" ):
self._hf_hook.pre_forward(self )
return method(self , *_lowerCAmelCase , **_lowerCAmelCase )
return wrapper
| 35 |
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def A ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict ) -> Any:
'''simple docstring'''
_UpperCAmelCase = multiprocessing.Manager()
_UpperCAmelCase = manager.list()
_UpperCAmelCase = multiprocessing.Process(target=_UpperCAmelCase , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('timed out' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def A ( _UpperCAmelCase : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
_UpperCAmelCase = shutil.rmtree
_UpperCAmelCase = os.rmdir
_UpperCAmelCase = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
_UpperCAmelCase = {}
with swallow_io():
with time_limit(_UpperCAmelCase ):
exec(_UpperCAmelCase , _UpperCAmelCase )
result.append('passed' )
except TimeoutException:
result.append('timed out' )
except BaseException as e:
result.append(F"failed: {e}" )
# Needed for cleaning up.
_UpperCAmelCase = rmtree
_UpperCAmelCase = rmdir
_UpperCAmelCase = chdir
@contextlib.contextmanager
def A ( _UpperCAmelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
def signal_handler(_UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict ):
raise TimeoutException('Timed out!' )
signal.setitimer(signal.ITIMER_REAL , _UpperCAmelCase )
signal.signal(signal.SIGALRM , _UpperCAmelCase )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def A ( ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = WriteOnlyStringIO()
with contextlib.redirect_stdout(_UpperCAmelCase ):
with contextlib.redirect_stderr(_UpperCAmelCase ):
with redirect_stdin(_UpperCAmelCase ):
yield
@contextlib.contextmanager
def A ( ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as dirname:
with chdir(_UpperCAmelCase ):
yield dirname
class __lowerCAmelCase ( A ):
pass
class __lowerCAmelCase ( io.StringIO ):
def _lowerCamelCase ( self : Tuple , *A : str , **A : Any) -> Any:
"""simple docstring"""
raise OSError
def _lowerCamelCase ( self : List[str] , *A : Optional[Any] , **A : Optional[Any]) -> Optional[int]:
"""simple docstring"""
raise OSError
def _lowerCamelCase ( self : str , *A : List[str] , **A : List[Any]) -> Union[str, Any]:
"""simple docstring"""
raise OSError
def _lowerCamelCase ( self : Union[str, Any] , *A : Optional[Any] , **A : List[str]) -> Optional[int]:
"""simple docstring"""
return False
class __lowerCAmelCase ( contextlib._RedirectStream ): # type: ignore
UpperCamelCase = '''stdin'''
@contextlib.contextmanager
def A ( _UpperCAmelCase : List[Any] ) -> Dict:
'''simple docstring'''
if root == ".":
yield
return
_UpperCAmelCase = os.getcwd()
os.chdir(_UpperCAmelCase )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str]=None ) -> Any:
'''simple docstring'''
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
_UpperCAmelCase = None
_UpperCAmelCase = None
import os
_UpperCAmelCase = '1'
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
import shutil
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
import subprocess
_UpperCAmelCase = None # type: ignore
_UpperCAmelCase = None
import sys
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
| 339 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"""microsoft/focalnet-tiny""": """https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json""",
}
class UpperCAmelCase__ ( lowercase__ , lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Dict = '''focalnet'''
def __init__( self : List[Any] ,_a : Optional[Any]=224 ,_a : int=4 ,_a : Union[str, Any]=3 ,_a : List[str]=96 ,_a : Union[str, Any]=False ,_a : str=[192, 384, 768, 768] ,_a : Dict=[2, 2, 6, 2] ,_a : Any=[2, 2, 2, 2] ,_a : str=[3, 3, 3, 3] ,_a : Optional[int]="gelu" ,_a : Tuple=4.0 ,_a : str=0.0 ,_a : Union[str, Any]=0.1 ,_a : str=False ,_a : int=1E-4 ,_a : Dict=False ,_a : Optional[Any]=False ,_a : Optional[int]=False ,_a : str=0.02 ,_a : Optional[Any]=1E-5 ,_a : List[str]=32 ,_a : Tuple=None ,_a : Any=None ,**_a : Optional[Any] ,):
'''simple docstring'''
super().__init__(**_a )
_a : List[str] = image_size
_a : Optional[Any] = patch_size
_a : List[str] = num_channels
_a : List[str] = embed_dim
_a : Optional[int] = use_conv_embed
_a : Optional[Any] = hidden_sizes
_a : Dict = depths
_a : int = focal_levels
_a : Optional[int] = focal_windows
_a : List[str] = hidden_act
_a : List[Any] = mlp_ratio
_a : Optional[int] = hidden_dropout_prob
_a : Optional[int] = drop_path_rate
_a : str = use_layerscale
_a : Any = layerscale_value
_a : Tuple = use_post_layernorm
_a : List[str] = use_post_layernorm_in_modulation
_a : Optional[int] = normalize_modulator
_a : Optional[int] = initializer_range
_a : Any = layer_norm_eps
_a : Optional[int] = encoder_stride
_a : List[Any] = ['stem'] + [F"""stage{idx}""" for idx in range(1 ,len(self.depths ) + 1 )]
_a, _a : Dict = get_aligned_output_features_output_indices(
out_features=_a ,out_indices=_a ,stage_names=self.stage_names )
| 5 |
'''simple docstring'''
import qiskit
def UpperCAmelCase_ (__a : int , __a : int ):
"""simple docstring"""
_a : Any = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
_a : List[Any] = qiskit.QuantumCircuit(__a , __a )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
_a : Tuple = qiskit.execute(__a , __a , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(__a )
if __name__ == "__main__":
print(f'''Total count for various states are: {single_qubit_measure(1, 1)}''')
| 5 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"MIT/ast-finetuned-audioset-10-10-0.4593": (
"https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"
),
}
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
_A : Tuple = """audio-spectrogram-transformer"""
def __init__( self: Tuple , snake_case: Dict=768 , snake_case: Tuple=12 , snake_case: Tuple=12 , snake_case: str=3_072 , snake_case: List[Any]="gelu" , snake_case: int=0.0 , snake_case: List[str]=0.0 , snake_case: Optional[Any]=0.0_2 , snake_case: Tuple=1E-12 , snake_case: int=16 , snake_case: List[str]=True , snake_case: Dict=10 , snake_case: Dict=10 , snake_case: Any=1_024 , snake_case: List[str]=128 , **snake_case: List[str] , ) -> Optional[int]:
super().__init__(**snake_case )
snake_case_ :Optional[Any] = hidden_size
snake_case_ :int = num_hidden_layers
snake_case_ :Optional[int] = num_attention_heads
snake_case_ :int = intermediate_size
snake_case_ :Tuple = hidden_act
snake_case_ :int = hidden_dropout_prob
snake_case_ :Tuple = attention_probs_dropout_prob
snake_case_ :Union[str, Any] = initializer_range
snake_case_ :Any = layer_norm_eps
snake_case_ :str = patch_size
snake_case_ :List[str] = qkv_bias
snake_case_ :str = frequency_stride
snake_case_ :Any = time_stride
snake_case_ :Dict = max_length
snake_case_ :List[Any] = num_mel_bins
| 66 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : List[Any] ) -> Dict:
_a : Optional[int] = tempfile.mkdtemp()
_a : Optional[Any] = SamImageProcessor()
_a : int = SamProcessor(UpperCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
def _lowercase ( self : Tuple , **UpperCAmelCase__ : Any ) -> Any:
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ).image_processor
def _lowercase ( self : str ) -> int:
shutil.rmtree(self.tmpdirname )
def _lowercase ( self : Tuple ) -> Dict:
_a : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_a : Optional[int] = [Image.fromarray(np.moveaxis(UpperCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowercase ( self : Dict ) -> Dict:
_a : List[Any] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_a : Tuple = self.get_image_processor(do_normalize=UpperCAmelCase__ , padding_value=1.0 )
_a : Tuple = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=UpperCAmelCase__ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase__ )
def _lowercase ( self : Union[str, Any] ) -> Tuple:
_a : Optional[Any] = self.get_image_processor()
_a : int = SamProcessor(image_processor=UpperCAmelCase__ )
_a : Union[str, Any] = self.prepare_image_inputs()
_a : List[str] = image_processor(UpperCAmelCase__ , return_tensors="""np""" )
_a : List[str] = processor(images=UpperCAmelCase__ , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_torch
def _lowercase ( self : Optional[Any] ) -> Optional[Any]:
_a : Optional[Any] = self.get_image_processor()
_a : Dict = SamProcessor(image_processor=UpperCAmelCase__ )
_a : Tuple = [torch.ones((1, 3, 5, 5) )]
_a : Tuple = [[1764, 2646]]
_a : Optional[int] = [[683, 1024]]
_a : List[Any] = processor.post_process_masks(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
_a : int = processor.post_process_masks(
UpperCAmelCase__ , torch.tensor(UpperCAmelCase__ ) , torch.tensor(UpperCAmelCase__ ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
_a : Optional[Any] = [np.ones((1, 3, 5, 5) )]
_a : Tuple = processor.post_process_masks(UpperCAmelCase__ , np.array(UpperCAmelCase__ ) , np.array(UpperCAmelCase__ ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
_a : List[str] = [[1, 0], [0, 1]]
with self.assertRaises(UpperCAmelCase__ ):
_a : str = processor.post_process_masks(UpperCAmelCase__ , np.array(UpperCAmelCase__ ) , np.array(UpperCAmelCase__ ) )
@require_vision
@require_tf
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : Any ) -> List[str]:
_a : List[str] = tempfile.mkdtemp()
_a : Any = SamImageProcessor()
_a : Union[str, Any] = SamProcessor(UpperCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
def _lowercase ( self : List[str] , **UpperCAmelCase__ : Any ) -> List[str]:
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ).image_processor
def _lowercase ( self : Optional[Any] ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def _lowercase ( self : Dict ) -> List[str]:
_a : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_a : List[Any] = [Image.fromarray(np.moveaxis(UpperCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
_a : Optional[int] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_a : str = self.get_image_processor(do_normalize=UpperCAmelCase__ , padding_value=1.0 )
_a : Union[str, Any] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=UpperCAmelCase__ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase__ )
def _lowercase ( self : List[Any] ) -> str:
_a : Union[str, Any] = self.get_image_processor()
_a : Dict = SamProcessor(image_processor=UpperCAmelCase__ )
_a : int = self.prepare_image_inputs()
_a : List[str] = image_processor(UpperCAmelCase__ , return_tensors="""np""" )
_a : List[str] = processor(images=UpperCAmelCase__ , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_tf
def _lowercase ( self : Optional[Any] ) -> int:
_a : Optional[Any] = self.get_image_processor()
_a : Dict = SamProcessor(image_processor=UpperCAmelCase__ )
_a : Any = [tf.ones((1, 3, 5, 5) )]
_a : Tuple = [[1764, 2646]]
_a : str = [[683, 1024]]
_a : Union[str, Any] = processor.post_process_masks(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
_a : Union[str, Any] = processor.post_process_masks(
UpperCAmelCase__ , tf.convert_to_tensor(UpperCAmelCase__ ) , tf.convert_to_tensor(UpperCAmelCase__ ) , return_tensors="""tf""" , )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
_a : List[Any] = [np.ones((1, 3, 5, 5) )]
_a : Optional[int] = processor.post_process_masks(
UpperCAmelCase__ , np.array(UpperCAmelCase__ ) , np.array(UpperCAmelCase__ ) , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
_a : Dict = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
_a : List[Any] = processor.post_process_masks(
UpperCAmelCase__ , np.array(UpperCAmelCase__ ) , np.array(UpperCAmelCase__ ) , return_tensors="""tf""" )
@require_vision
@require_torchvision
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : str ) -> Optional[Any]:
_a : Optional[Any] = tempfile.mkdtemp()
_a : Dict = SamImageProcessor()
_a : List[str] = SamProcessor(UpperCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
def _lowercase ( self : Any , **UpperCAmelCase__ : Dict ) -> int:
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ).image_processor
def _lowercase ( self : Tuple ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def _lowercase ( self : str ) -> int:
_a : str = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_a : int = [Image.fromarray(np.moveaxis(UpperCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def _lowercase ( self : int ) -> List[Any]:
_a : Optional[Any] = self.get_image_processor()
_a : Optional[Any] = SamProcessor(image_processor=UpperCAmelCase__ )
_a : Tuple = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
_a : str = [tf.convert_to_tensor(UpperCAmelCase__ )]
_a : Optional[int] = [torch.tensor(UpperCAmelCase__ )]
_a : Union[str, Any] = [[1764, 2646]]
_a : List[str] = [[683, 1024]]
_a : Optional[int] = processor.post_process_masks(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , return_tensors="""tf""" )
_a : List[str] = processor.post_process_masks(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , return_tensors="""pt""" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def _lowercase ( self : str ) -> Optional[Any]:
_a : List[Any] = self.get_image_processor()
_a : Any = SamProcessor(image_processor=UpperCAmelCase__ )
_a : Dict = self.prepare_image_inputs()
_a : List[str] = image_processor(UpperCAmelCase__ , return_tensors="""pt""" )["""pixel_values"""].numpy()
_a : str = processor(images=UpperCAmelCase__ , return_tensors="""pt""" )["""pixel_values"""].numpy()
_a : Optional[Any] = image_processor(UpperCAmelCase__ , return_tensors="""tf""" )["""pixel_values"""].numpy()
_a : Optional[int] = processor(images=UpperCAmelCase__ , return_tensors="""tf""" )["""pixel_values"""].numpy()
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ ) )
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ ) )
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ ) )
| 294 | 0 |
'''simple docstring'''
def lowerCamelCase__ ( _A ):
if not isinstance(_A , _A ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(_A ) == 0:
raise ValueError('Input list must be a non empty list' )
if len(_A ) == 1:
return True
a : int = series[1] - series[0]
for index in range(len(_A ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def lowerCamelCase__ ( _A ):
if not isinstance(_A , _A ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(_A ) == 0:
raise ValueError('Input list must be a non empty list' )
a : Dict = 0
for val in series:
answer += val
return answer / len(_A )
if __name__ == "__main__":
import doctest
doctest.testmod() | 96 |
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowerCAmelCase: List[Any] = logging.get_logger(__name__)
# General docstring
lowerCAmelCase: Optional[int] = 'RegNetConfig'
# Base docstring
lowerCAmelCase: Tuple = 'facebook/regnet-y-040'
lowerCAmelCase: str = [1, 1_0_8_8, 7, 7]
# Image classification docstring
lowerCAmelCase: Any = 'facebook/regnet-y-040'
lowerCAmelCase: List[Any] = 'tabby, tabby cat'
lowerCAmelCase: Optional[Any] = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class a__( nn.Module ):
def __init__( self : str , __snake_case : int , __snake_case : int , __snake_case : int = 3 , __snake_case : int = 1 , __snake_case : int = 1 , __snake_case : Optional[str] = "relu" , ):
super().__init__()
a : Tuple = nn.Convad(
__snake_case , __snake_case , kernel_size=__snake_case , stride=__snake_case , padding=kernel_size // 2 , groups=__snake_case , bias=__snake_case , )
a : Dict = nn.BatchNormad(__snake_case )
a : Tuple = ACTaFN[activation] if activation is not None else nn.Identity()
def lowercase_ ( self : Dict , __snake_case : int ):
a : int = self.convolution(__snake_case )
a : int = self.normalization(__snake_case )
a : Any = self.activation(__snake_case )
return hidden_state
class a__( nn.Module ):
def __init__( self : List[str] , __snake_case : RegNetConfig ):
super().__init__()
a : Optional[int] = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
a : Optional[int] = config.num_channels
def lowercase_ ( self : int , __snake_case : List[str] ):
a : List[Any] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
a : List[str] = self.embedder(__snake_case )
return hidden_state
class a__( nn.Module ):
def __init__( self : Tuple , __snake_case : int , __snake_case : int , __snake_case : int = 2 ):
super().__init__()
a : Optional[int] = nn.Convad(__snake_case , __snake_case , kernel_size=1 , stride=__snake_case , bias=__snake_case )
a : Optional[Any] = nn.BatchNormad(__snake_case )
def lowercase_ ( self : Union[str, Any] , __snake_case : Tensor ):
a : Any = self.convolution(__snake_case )
a : Dict = self.normalization(__snake_case )
return hidden_state
class a__( nn.Module ):
def __init__( self : Union[str, Any] , __snake_case : int , __snake_case : int ):
super().__init__()
a : Any = nn.AdaptiveAvgPoolad((1, 1) )
a : Dict = nn.Sequential(
nn.Convad(__snake_case , __snake_case , kernel_size=1 ) , nn.ReLU() , nn.Convad(__snake_case , __snake_case , kernel_size=1 ) , nn.Sigmoid() , )
def lowercase_ ( self : List[str] , __snake_case : List[Any] ):
# b c h w -> b c 1 1
a : Any = self.pooler(__snake_case )
a : Optional[Any] = self.attention(__snake_case )
a : Tuple = hidden_state * attention
return hidden_state
class a__( nn.Module ):
def __init__( self : List[str] , __snake_case : RegNetConfig , __snake_case : int , __snake_case : int , __snake_case : int = 1 ):
super().__init__()
a : Union[str, Any] = in_channels != out_channels or stride != 1
a : Dict = max(1 , out_channels // config.groups_width )
a : List[str] = (
RegNetShortCut(__snake_case , __snake_case , stride=__snake_case ) if should_apply_shortcut else nn.Identity()
)
a : int = nn.Sequential(
RegNetConvLayer(__snake_case , __snake_case , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(__snake_case , __snake_case , stride=__snake_case , groups=__snake_case , activation=config.hidden_act ) , RegNetConvLayer(__snake_case , __snake_case , kernel_size=1 , activation=__snake_case ) , )
a : int = ACTaFN[config.hidden_act]
def lowercase_ ( self : Union[str, Any] , __snake_case : Tuple ):
a : List[str] = hidden_state
a : Dict = self.layer(__snake_case )
a : Tuple = self.shortcut(__snake_case )
hidden_state += residual
a : Optional[int] = self.activation(__snake_case )
return hidden_state
class a__( nn.Module ):
def __init__( self : List[str] , __snake_case : RegNetConfig , __snake_case : int , __snake_case : int , __snake_case : int = 1 ):
super().__init__()
a : Union[str, Any] = in_channels != out_channels or stride != 1
a : Dict = max(1 , out_channels // config.groups_width )
a : Dict = (
RegNetShortCut(__snake_case , __snake_case , stride=__snake_case ) if should_apply_shortcut else nn.Identity()
)
a : List[str] = nn.Sequential(
RegNetConvLayer(__snake_case , __snake_case , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(__snake_case , __snake_case , stride=__snake_case , groups=__snake_case , activation=config.hidden_act ) , RegNetSELayer(__snake_case , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(__snake_case , __snake_case , kernel_size=1 , activation=__snake_case ) , )
a : Union[str, Any] = ACTaFN[config.hidden_act]
def lowercase_ ( self : Any , __snake_case : int ):
a : Tuple = hidden_state
a : Tuple = self.layer(__snake_case )
a : Optional[int] = self.shortcut(__snake_case )
hidden_state += residual
a : Any = self.activation(__snake_case )
return hidden_state
class a__( nn.Module ):
def __init__( self : int , __snake_case : RegNetConfig , __snake_case : int , __snake_case : int , __snake_case : int = 2 , __snake_case : int = 2 , ):
super().__init__()
a : int = RegNetXLayer if config.layer_type == 'x' else RegNetYLayer
a : Any = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
__snake_case , __snake_case , __snake_case , stride=__snake_case , ) , *[layer(__snake_case , __snake_case , __snake_case ) for _ in range(depth - 1 )] , )
def lowercase_ ( self : Optional[Any] , __snake_case : str ):
a : Dict = self.layers(__snake_case )
return hidden_state
class a__( nn.Module ):
def __init__( self : List[Any] , __snake_case : RegNetConfig ):
super().__init__()
a : Dict = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
__snake_case , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
a : str = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(__snake_case , config.depths[1:] ):
self.stages.append(RegNetStage(__snake_case , __snake_case , __snake_case , depth=__snake_case ) )
def lowercase_ ( self : Union[str, Any] , __snake_case : Tensor , __snake_case : bool = False , __snake_case : bool = True ):
a : Union[str, Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
a : List[str] = hidden_states + (hidden_state,)
a : int = stage_module(__snake_case )
if output_hidden_states:
a : Union[str, Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=__snake_case , hidden_states=__snake_case )
class a__( lowerCamelCase__ ):
lowercase__ = RegNetConfig
lowercase__ = """regnet"""
lowercase__ = """pixel_values"""
lowercase__ = True
def lowercase_ ( self : Optional[Any] , __snake_case : Optional[int] ):
if isinstance(__snake_case , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(__snake_case , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def lowercase_ ( self : Dict , __snake_case : List[str] , __snake_case : Dict=False ):
if isinstance(__snake_case , __snake_case ):
a : Optional[Any] = value
lowerCAmelCase: Union[str, Any] = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowerCAmelCase: int = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""" , lowerCamelCase__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class a__( lowerCamelCase__ ):
def __init__( self : Dict , __snake_case : Dict ):
super().__init__(__snake_case )
a : str = config
a : str = RegNetEmbeddings(__snake_case )
a : Dict = RegNetEncoder(__snake_case )
a : int = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__snake_case )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__snake_case , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowercase_ ( self : Optional[Any] , __snake_case : Tensor , __snake_case : Optional[bool] = None , __snake_case : Optional[bool] = None ):
a : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
a : int = self.embedder(__snake_case )
a : Optional[int] = self.encoder(
__snake_case , output_hidden_states=__snake_case , return_dict=__snake_case )
a : List[Any] = encoder_outputs[0]
a : Any = self.pooler(__snake_case )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__snake_case , pooler_output=__snake_case , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , lowerCamelCase__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class a__( lowerCamelCase__ ):
def __init__( self : int , __snake_case : Optional[int] ):
super().__init__(__snake_case )
a : Optional[Any] = config.num_labels
a : Tuple = RegNetModel(__snake_case )
# classification head
a : List[Any] = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__snake_case )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__snake_case , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowercase_ ( self : Optional[Any] , __snake_case : Optional[torch.FloatTensor] = None , __snake_case : Optional[torch.LongTensor] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[bool] = None , ):
a : str = return_dict if return_dict is not None else self.config.use_return_dict
a : Optional[Any] = self.regnet(__snake_case , output_hidden_states=__snake_case , return_dict=__snake_case )
a : Any = outputs.pooler_output if return_dict else outputs[1]
a : str = self.classifier(__snake_case )
a : Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
a : List[str] = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
a : int = 'single_label_classification'
else:
a : Dict = 'multi_label_classification'
if self.config.problem_type == "regression":
a : Any = MSELoss()
if self.num_labels == 1:
a : int = loss_fct(logits.squeeze() , labels.squeeze() )
else:
a : List[str] = loss_fct(__snake_case , __snake_case )
elif self.config.problem_type == "single_label_classification":
a : Union[str, Any] = CrossEntropyLoss()
a : Tuple = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
a : Any = BCEWithLogitsLoss()
a : Tuple = loss_fct(__snake_case , __snake_case )
if not return_dict:
a : Optional[int] = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__snake_case , logits=__snake_case , hidden_states=outputs.hidden_states ) | 96 | 1 |
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase__ ( lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
snake_case_ =OpenAIGPTTokenizer
snake_case_ =OpenAIGPTTokenizerFast
snake_case_ =True
snake_case_ =False
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase__ : Optional[Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
lowerCAmelCase__ : Union[str, Any] = dict(zip(UpperCAmelCase__ ,range(len(UpperCAmelCase__ ) ) ) )
lowerCAmelCase__ : Optional[int] = ["""#version: 0.2""", """l o""", """lo w""", """e r</w>""", """"""]
lowerCAmelCase__ : Optional[int] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase__ : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file ,'''w''' ) as fp:
fp.write(json.dumps(UpperCAmelCase__ ) )
with open(self.merges_file ,'''w''' ) as fp:
fp.write('''\n'''.join(UpperCAmelCase__ ) )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
return "lower newer", "lower newer"
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = OpenAIGPTTokenizer(self.vocab_file ,self.merges_file )
lowerCAmelCase__ : Any = """lower"""
lowerCAmelCase__ : Optional[int] = ["""low""", """er</w>"""]
lowerCAmelCase__ : Optional[Any] = tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ ,UpperCAmelCase__ )
lowerCAmelCase__ : Tuple = tokens + ["""<unk>"""]
lowerCAmelCase__ : Dict = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) ,UpperCAmelCase__ )
def lowerCAmelCase__ (self ,__lowerCamelCase=15 ) -> Tuple:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ ,**UpperCAmelCase__ )
# Simple input
lowerCAmelCase__ : Any = """This is a simple input"""
lowerCAmelCase__ : str = ["""This is a simple input 1""", """This is a simple input 2"""]
lowerCAmelCase__ : Tuple = ("""This is a simple input""", """This is a pair""")
lowerCAmelCase__ : Any = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(UpperCAmelCase__ ,tokenizer_r.encode ,UpperCAmelCase__ ,max_length=UpperCAmelCase__ ,padding='''max_length''' )
# Simple input
self.assertRaises(UpperCAmelCase__ ,tokenizer_r.encode_plus ,UpperCAmelCase__ ,max_length=UpperCAmelCase__ ,padding='''max_length''' )
# Simple input
self.assertRaises(
UpperCAmelCase__ ,tokenizer_r.batch_encode_plus ,UpperCAmelCase__ ,max_length=UpperCAmelCase__ ,padding='''max_length''' ,)
# Pair input
self.assertRaises(UpperCAmelCase__ ,tokenizer_r.encode ,UpperCAmelCase__ ,max_length=UpperCAmelCase__ ,padding='''max_length''' )
# Pair input
self.assertRaises(UpperCAmelCase__ ,tokenizer_r.encode_plus ,UpperCAmelCase__ ,max_length=UpperCAmelCase__ ,padding='''max_length''' )
# Pair input
self.assertRaises(
UpperCAmelCase__ ,tokenizer_r.batch_encode_plus ,UpperCAmelCase__ ,max_length=UpperCAmelCase__ ,padding='''max_length''' ,)
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
pass
@require_ftfy
@require_spacy
@require_tokenizers
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
pass
| 129 |
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def __lowercase ( _A ) -> List[Tuple[int, ...]]:
SCREAMING_SNAKE_CASE : Optional[int] = []
if isinstance(_A , _A ):
for v in tree.values():
shapes.extend(_fetch_dims(_A ) )
elif isinstance(_A , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(_A ) )
elif isinstance(_A , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("""Not supported""" )
return shapes
@torch.jit.ignore
def __lowercase ( _A , _A ) -> Tuple[int, ...]:
SCREAMING_SNAKE_CASE : List[Any] = []
for d in reversed(_A ):
idx.append(flat_idx % d )
SCREAMING_SNAKE_CASE : Tuple = flat_idx // d
return tuple(reversed(_A ) )
@torch.jit.ignore
def __lowercase ( _A , _A , _A , _A = None , _A = None , ) -> List[Tuple[slice, ...]]:
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(_A ) -> None:
SCREAMING_SNAKE_CASE : int = True
for i in range(len(_A ) ):
SCREAMING_SNAKE_CASE : Dict = -1 * (i + 1)
l[reversed_idx] &= tally
SCREAMING_SNAKE_CASE : Any = l[reversed_idx]
if start_edges is None:
SCREAMING_SNAKE_CASE : Tuple = [s == 0 for s in start]
reduce_edge_list(_A )
if end_edges is None:
SCREAMING_SNAKE_CASE : Tuple = [e == (d - 1) for e, d in zip(_A , _A )]
reduce_edge_list(_A )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(_A ) == 0:
return [()]
elif len(_A ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
SCREAMING_SNAKE_CASE : List[Tuple[slice, ...]] = []
SCREAMING_SNAKE_CASE : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(_A , _A ):
if s == e:
path_list.append(slice(_A , s + 1 ) )
else:
break
SCREAMING_SNAKE_CASE : Tuple[slice, ...] = tuple(_A )
SCREAMING_SNAKE_CASE : List[str] = len(_A )
# start == end, and we're done
if divergence_idx == len(_A ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
SCREAMING_SNAKE_CASE : List[str] = start[divergence_idx]
return tuple(
path + (slice(_A , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
SCREAMING_SNAKE_CASE : Tuple = end[divergence_idx]
return tuple(
path + (slice(_A , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
SCREAMING_SNAKE_CASE : int = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def __lowercase ( _A , _A , _A , _A ) -> torch.Tensor:
SCREAMING_SNAKE_CASE : Tuple = t.shape[:no_batch_dims]
SCREAMING_SNAKE_CASE : Union[str, Any] = list(_flat_idx_to_idx(_A , _A ) )
# _get_minimal_slice_set is inclusive
SCREAMING_SNAKE_CASE : Any = list(_flat_idx_to_idx(flat_end - 1 , _A ) )
# Get an ordered list of slices to perform
SCREAMING_SNAKE_CASE : List[Any] = _get_minimal_slice_set(
_A , _A , _A , )
SCREAMING_SNAKE_CASE : List[Any] = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def __lowercase ( _A , _A , _A , _A , _A = False , _A = None , _A = False , ) -> Any:
if not (len(_A ) > 0):
raise ValueError("""Must provide at least one input""" )
SCREAMING_SNAKE_CASE : Tuple = [shape[:no_batch_dims] for shape in _fetch_dims(_A )]
SCREAMING_SNAKE_CASE : str = tuple([max(_A ) for s in zip(*_A )] )
def _prep_inputs(_A ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
SCREAMING_SNAKE_CASE : List[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
SCREAMING_SNAKE_CASE : Union[str, Any] = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
SCREAMING_SNAKE_CASE : Dict[str, Any] = tensor_tree_map(_prep_inputs , _A )
SCREAMING_SNAKE_CASE : Optional[int] = None
if _out is not None:
SCREAMING_SNAKE_CASE : Optional[int] = tensor_tree_map(lambda _A : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
SCREAMING_SNAKE_CASE : Optional[int] = 1
for d in orig_batch_dims:
flat_batch_dim *= d
SCREAMING_SNAKE_CASE : Tuple = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(_A ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = prepped_outputs
for _ in range(_A ):
# Chunk the input
if not low_mem:
SCREAMING_SNAKE_CASE : int = _select_chunk
else:
SCREAMING_SNAKE_CASE : Optional[int] = partial(
_chunk_slice , flat_start=_A , flat_end=min(_A , i + chunk_size ) , no_batch_dims=len(_A ) , )
SCREAMING_SNAKE_CASE : Dict[str, Any] = tensor_tree_map(_A , _A )
# Run the layer on the chunk
SCREAMING_SNAKE_CASE : Tuple = layer(**_A )
# Allocate space for the output
if out is None:
SCREAMING_SNAKE_CASE : List[str] = tensor_tree_map(lambda _A : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , _A )
# Put the chunk in its pre-allocated space
if isinstance(_A , _A ):
def assign(_A , _A ) -> None:
for k, v in da.items():
if isinstance(_A , _A ):
assign(_A , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
SCREAMING_SNAKE_CASE : Optional[Any] = da[k]
assign(_A , _A )
elif isinstance(_A , _A ):
for xa, xa in zip(_A , _A ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
SCREAMING_SNAKE_CASE : str = xa
elif isinstance(_A , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
SCREAMING_SNAKE_CASE : List[Any] = output_chunk
else:
raise ValueError("""Not supported""" )
i += chunk_size
SCREAMING_SNAKE_CASE : Any = tensor_tree_map(lambda _A : t.view(orig_batch_dims + t.shape[1:] ) , _A )
return out
class a__ :
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase__ : int = 5_1_2 , ) ->int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = max_chunk_size
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Optional[tuple] = None
def _lowercase ( self : List[Any] , UpperCAmelCase__ : Callable , UpperCAmelCase__ : tuple , UpperCAmelCase__ : int ) ->int:
"""simple docstring"""
logging.info("""Tuning chunk size...""" )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
SCREAMING_SNAKE_CASE : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
SCREAMING_SNAKE_CASE : Dict = [c for c in candidates if c > min_chunk_size]
SCREAMING_SNAKE_CASE : List[str] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(UpperCAmelCase__ : int ) -> bool:
try:
with torch.no_grad():
fn(*UpperCAmelCase__ , chunk_size=UpperCAmelCase__ )
return True
except RuntimeError:
return False
SCREAMING_SNAKE_CASE : List[str] = 0
SCREAMING_SNAKE_CASE : List[str] = len(UpperCAmelCase__ ) - 1
while i > min_viable_chunk_size_index:
SCREAMING_SNAKE_CASE : int = test_chunk_size(candidates[i] )
if not viable:
SCREAMING_SNAKE_CASE : Tuple = (min_viable_chunk_size_index + i) // 2
else:
SCREAMING_SNAKE_CASE : List[str] = i
SCREAMING_SNAKE_CASE : List[str] = (i + len(UpperCAmelCase__ ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def _lowercase ( self : List[Any] , UpperCAmelCase__ : Iterable , UpperCAmelCase__ : Iterable ) ->bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = True
for aa, aa in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
assert type(UpperCAmelCase__ ) == type(UpperCAmelCase__ )
if isinstance(UpperCAmelCase__ , (list, tuple) ):
consistent &= self._compare_arg_caches(UpperCAmelCase__ , UpperCAmelCase__ )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE : Optional[Any] = [v for _, v in sorted(aa.items() , key=lambda UpperCAmelCase__ : x[0] )]
SCREAMING_SNAKE_CASE : List[str] = [v for _, v in sorted(aa.items() , key=lambda UpperCAmelCase__ : x[0] )]
consistent &= self._compare_arg_caches(UpperCAmelCase__ , UpperCAmelCase__ )
else:
consistent &= aa == aa
return consistent
def _lowercase ( self : List[str] , UpperCAmelCase__ : Callable , UpperCAmelCase__ : tuple , UpperCAmelCase__ : int , ) ->int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : tuple = tree_map(lambda UpperCAmelCase__ : a.shape if isinstance(UpperCAmelCase__ , torch.Tensor ) else a , UpperCAmelCase__ , UpperCAmelCase__ )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = self._compare_arg_caches(self.cached_arg_data , UpperCAmelCase__ )
else:
# Otherwise, we can reuse the precomputed value
SCREAMING_SNAKE_CASE : List[Any] = False
if not consistent:
SCREAMING_SNAKE_CASE : List[Any] = self._determine_favorable_chunk_size(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 245 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class UpperCamelCase ( lowercase_ ):
lowercase = 'megatron-bert'
def __init__( self ,__UpperCamelCase=2_9056 ,__UpperCamelCase=1024 ,__UpperCamelCase=24 ,__UpperCamelCase=16 ,__UpperCamelCase=4096 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=512 ,__UpperCamelCase=2 ,__UpperCamelCase=0.02 ,__UpperCamelCase=1e-12 ,__UpperCamelCase=0 ,__UpperCamelCase="absolute" ,__UpperCamelCase=True ,**__UpperCamelCase ,) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=__UpperCamelCase ,**__UpperCamelCase )
lowercase_ : List[str] = vocab_size
lowercase_ : Optional[Any] = hidden_size
lowercase_ : Tuple = num_hidden_layers
lowercase_ : List[str] = num_attention_heads
lowercase_ : Union[str, Any] = hidden_act
lowercase_ : Tuple = intermediate_size
lowercase_ : Any = hidden_dropout_prob
lowercase_ : Tuple = attention_probs_dropout_prob
lowercase_ : List[Any] = max_position_embeddings
lowercase_ : Optional[Any] = type_vocab_size
lowercase_ : Any = initializer_range
lowercase_ : str = layer_norm_eps
lowercase_ : Optional[Any] = position_embedding_type
lowercase_ : Tuple = use_cache
| 358 | """simple docstring"""
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ) -> None:
'''simple docstring'''
lowercase_ : int = set_counts
lowercase_ : List[Any] = max(__UpperCamelCase )
lowercase_ : Union[str, Any] = len(__UpperCamelCase )
lowercase_ : Dict = [1] * num_sets
lowercase_ : Optional[int] = list(range(__UpperCamelCase ) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> bool:
'''simple docstring'''
lowercase_ : Optional[int] = self.get_parent(__UpperCamelCase )
lowercase_ : int = self.get_parent(__UpperCamelCase )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
lowercase_ : Tuple = 0
lowercase_ : str = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
lowercase_ : Union[str, Any] = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
lowercase_ : str = 0
lowercase_ : Tuple = src_parent
lowercase_ : int = self.set_counts[src_parent]
lowercase_ : str = max(self.max_set ,__UpperCamelCase )
return True
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int:
'''simple docstring'''
if self.parents[disj_set] == disj_set:
return disj_set
lowercase_ : Union[str, Any] = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 321 | 0 |
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase ):
assert column_title.isupper()
_UpperCAmelCase : Union[str, Any] = 0
_UpperCAmelCase : List[str] = len(__lowerCAmelCase ) - 1
_UpperCAmelCase : Tuple = 0
while index >= 0:
_UpperCAmelCase : List[Any] = (ord(column_title[index] ) - 64) * pow(26 , __lowerCAmelCase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 234 |
'''simple docstring'''
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowerCamelCase__ = {'UserAgent': UserAgent().random}
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : List[str] = script.contents[0]
_UpperCAmelCase : Any = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class lowerCAmelCase__ :
def __init__( self : str , lowerCamelCase__ : List[Any] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = F"""https://www.instagram.com/{username}/"""
_UpperCAmelCase : Dict = self.get_json()
def lowerCAmelCase__ ( self : Any ) ->dict:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = requests.get(self.url , headers=lowerCamelCase__ ).text
_UpperCAmelCase : Any = BeautifulSoup(lowerCamelCase__ , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : Optional[Any] ) ->str:
'''simple docstring'''
return F"""{self.__class__.__name__}('{self.username}')"""
def __str__( self : List[Any] ) ->str:
'''simple docstring'''
return F"""{self.fullname} ({self.username}) is {self.biography}"""
@property
def lowerCAmelCase__ ( self : Optional[int] ) ->str:
'''simple docstring'''
return self.user_data["username"]
@property
def lowerCAmelCase__ ( self : Optional[int] ) ->str:
'''simple docstring'''
return self.user_data["full_name"]
@property
def lowerCAmelCase__ ( self : Optional[int] ) ->str:
'''simple docstring'''
return self.user_data["biography"]
@property
def lowerCAmelCase__ ( self : int ) ->str:
'''simple docstring'''
return self.user_data["business_email"]
@property
def lowerCAmelCase__ ( self : str ) ->str:
'''simple docstring'''
return self.user_data["external_url"]
@property
def lowerCAmelCase__ ( self : Tuple ) ->int:
'''simple docstring'''
return self.user_data["edge_followed_by"]["count"]
@property
def lowerCAmelCase__ ( self : str ) ->int:
'''simple docstring'''
return self.user_data["edge_follow"]["count"]
@property
def lowerCAmelCase__ ( self : Any ) ->int:
'''simple docstring'''
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def lowerCAmelCase__ ( self : List[Any] ) ->str:
'''simple docstring'''
return self.user_data["profile_pic_url_hd"]
@property
def lowerCAmelCase__ ( self : Optional[Any] ) ->bool:
'''simple docstring'''
return self.user_data["is_verified"]
@property
def lowerCAmelCase__ ( self : int ) ->bool:
'''simple docstring'''
return self.user_data["is_private"]
def __lowerCAmelCase (__lowerCAmelCase = "github" ):
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
_UpperCAmelCase : Dict = InstagramUser(__lowerCAmelCase )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , __lowerCAmelCase )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120_000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = InstagramUser('github')
print(instagram_user)
print(F'''{instagram_user.number_of_posts = }''')
print(F'''{instagram_user.number_of_followers = }''')
print(F'''{instagram_user.number_of_followings = }''')
print(F'''{instagram_user.email = }''')
print(F'''{instagram_user.website = }''')
print(F'''{instagram_user.profile_picture_url = }''')
print(F'''{instagram_user.is_verified = }''')
print(F'''{instagram_user.is_private = }''')
| 234 | 1 |
"""simple docstring"""
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowercase__ = version.parse(importlib_metadata.version('nltk'))
if NLTK_VERSION >= version.Version('3.6.4'):
from nltk import word_tokenize
lowercase__ = '\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n'
lowercase__ = '\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n'
lowercase__ = '\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]\n >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results["meteor"], 4))\n 0.6944\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Value('string' , id='sequence'),
}) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[
'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score',
'https://en.wikipedia.org/wiki/METEOR',
] , )
def lowerCamelCase_ ( self , lowercase) -> Dict:
'''simple docstring'''
import nltk
nltk.download('wordnet')
if NLTK_VERSION >= version.Version('3.6.5'):
nltk.download('punkt')
if NLTK_VERSION >= version.Version('3.6.6'):
nltk.download('omw-1.4')
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase=0.9 , lowercase=3 , lowercase=0.5) -> Tuple:
'''simple docstring'''
if NLTK_VERSION >= version.Version('3.6.5'):
a__: Optional[Any] = [
meteor_score.single_meteor_score(
word_tokenize(lowercase) , word_tokenize(lowercase) , alpha=lowercase , beta=lowercase , gamma=lowercase)
for ref, pred in zip(lowercase , lowercase)
]
else:
a__: Tuple = [
meteor_score.single_meteor_score(lowercase , lowercase , alpha=lowercase , beta=lowercase , gamma=lowercase)
for ref, pred in zip(lowercase , lowercase)
]
return {"meteor": np.mean(lowercase)}
| 203 | """simple docstring"""
def __a ( _SCREAMING_SNAKE_CASE = 1000000 ) ->int:
a__: Any = limit + 1
a__: List[str] = [0] * limit
for first_term in range(1 , _SCREAMING_SNAKE_CASE ):
for n in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a__: Any = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
a__: Optional[int] = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f"{solution() = }")
| 203 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
A_ : Dict = logging.get_logger(__name__)
A_ : str = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
A_ : int = {
"""vocab_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"""
),
},
"""tokenizer_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""",
"""roberta-base-openai-detector""": (
"""https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"""
),
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"""
),
},
}
A_ : Optional[int] = {
"""roberta-base""": 512,
"""roberta-large""": 512,
"""roberta-large-mnli""": 512,
"""distilroberta-base""": 512,
"""roberta-base-openai-detector""": 512,
"""roberta-large-openai-detector""": 512,
}
class lowerCamelCase (A__ ):
lowerCamelCase__ : Union[str, Any] = VOCAB_FILES_NAMES
lowerCamelCase__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : Any = ['input_ids', 'attention_mask']
lowerCamelCase__ : Tuple = RobertaTokenizer
def __init__( self : Optional[int] , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : int=None , __UpperCAmelCase : List[str]="replace" , __UpperCAmelCase : Optional[int]="<s>" , __UpperCAmelCase : str="</s>" , __UpperCAmelCase : Union[str, Any]="</s>" , __UpperCAmelCase : Dict="<s>" , __UpperCAmelCase : int="<unk>" , __UpperCAmelCase : Tuple="<pad>" , __UpperCAmelCase : List[str]="<mask>" , __UpperCAmelCase : List[Any]=False , __UpperCAmelCase : Any=True , **__UpperCAmelCase : Dict , ) -> List[Any]:
super().__init__(
__UpperCAmelCase , __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , errors=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase , **__UpperCAmelCase , )
SCREAMING_SNAKE_CASE__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __UpperCAmelCase ) != add_prefix_space:
SCREAMING_SNAKE_CASE__ = getattr(__UpperCAmelCase , pre_tok_state.pop("""type""" ) )
SCREAMING_SNAKE_CASE__ = add_prefix_space
SCREAMING_SNAKE_CASE__ = pre_tok_class(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = add_prefix_space
SCREAMING_SNAKE_CASE__ = """post_processor"""
SCREAMING_SNAKE_CASE__ = getattr(self.backend_tokenizer , __UpperCAmelCase , __UpperCAmelCase )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE__ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE__ = tuple(state["""sep"""] )
if "cls" in state:
SCREAMING_SNAKE_CASE__ = tuple(state["""cls"""] )
SCREAMING_SNAKE_CASE__ = False
if state.get("""add_prefix_space""" , __UpperCAmelCase ) != add_prefix_space:
SCREAMING_SNAKE_CASE__ = add_prefix_space
SCREAMING_SNAKE_CASE__ = True
if state.get("""trim_offsets""" , __UpperCAmelCase ) != trim_offsets:
SCREAMING_SNAKE_CASE__ = trim_offsets
SCREAMING_SNAKE_CASE__ = True
if changes_to_apply:
SCREAMING_SNAKE_CASE__ = getattr(__UpperCAmelCase , state.pop("""type""" ) )
SCREAMING_SNAKE_CASE__ = component_class(**__UpperCAmelCase )
setattr(self.backend_tokenizer , __UpperCAmelCase , __UpperCAmelCase )
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def SCREAMING_SNAKE_CASE ( self : Dict , __UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else value
SCREAMING_SNAKE_CASE__ = value
def SCREAMING_SNAKE_CASE ( self : str , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Tuple ) -> BatchEncoding:
SCREAMING_SNAKE_CASE__ = kwargs.get("""is_split_into_words""" , __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Optional[Any] ) -> BatchEncoding:
SCREAMING_SNAKE_CASE__ = kwargs.get("""is_split_into_words""" , __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[int] = None ) -> Tuple[str]:
SCREAMING_SNAKE_CASE__ = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Any=None ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : str = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 165 |
'''simple docstring'''
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
a : List[Any] = """__DUMMY_TRANSFORMERS_USER__"""
a : Tuple = """Dummy User"""
a : Optional[Any] = """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"""
a : Optional[Any] = """https://hub-ci.huggingface.co"""
a : List[Any] = CI_HUB_ENDPOINT + """/datasets/{repo_id}/resolve/{revision}/{path}"""
a : Tuple = CI_HUB_ENDPOINT + """/{repo_id}/resolve/{revision}/{filename}"""
a : str = Path("""~/.huggingface/hub_ci_token""").expanduser()
@pytest.fixture
def __lowerCamelCase ( _lowercase ) -> Optional[int]:
monkeypatch.setattr(
"""huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE""" , _lowercase )
@pytest.fixture
def __lowerCamelCase ( _lowercase ) -> List[str]:
monkeypatch.setattr("""datasets.config.HF_ENDPOINT""" , _lowercase )
monkeypatch.setattr("""datasets.config.HUB_DATASETS_URL""" , _lowercase )
@pytest.fixture
def __lowerCamelCase ( _lowercase ) -> Any:
monkeypatch.setattr("""huggingface_hub.hf_api.HfFolder.path_token""" , _lowercase )
@pytest.fixture
def __lowerCamelCase ( _lowercase , _lowercase ) -> Optional[Any]:
HfFolder.save_token(_lowercase )
yield
HfFolder.delete_token()
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( ) -> str:
return HfApi(endpoint=_lowercase )
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase ) -> Union[str, Any]:
UpperCAmelCase : str = HfFolder.get_token()
HfFolder.save_token(_lowercase )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(_lowercase )
@pytest.fixture
def __lowerCamelCase ( _lowercase ) -> Any:
def _cleanup_repo(_lowercase ):
hf_api.delete_repo(_lowercase , token=_lowercase , repo_type="""dataset""" )
return _cleanup_repo
@pytest.fixture
def __lowerCamelCase ( _lowercase ) -> List[str]:
@contextmanager
def _temporary_repo(_lowercase ):
try:
yield repo_id
finally:
cleanup_repo(_lowercase )
return _temporary_repo
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[str]:
UpperCAmelCase : str = F'''repo_txt_data-{int(time.time() * 10e3 )}'''
UpperCAmelCase : List[Any] = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(_lowercase , token=_lowercase , repo_type="""dataset""" , private=_lowercase )
hf_api.upload_file(
token=_lowercase , path_or_fileobj=str(_lowercase ) , path_in_repo="""data/text_data.txt""" , repo_id=_lowercase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(_lowercase , token=_lowercase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[Any]:
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Optional[int]:
UpperCAmelCase : Optional[int] = F'''repo_zipped_txt_data-{int(time.time() * 10e3 )}'''
UpperCAmelCase : Optional[int] = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(_lowercase , token=_lowercase , repo_type="""dataset""" , private=_lowercase )
hf_api.upload_file(
token=_lowercase , path_or_fileobj=str(_lowercase ) , path_in_repo="""data.zip""" , repo_id=_lowercase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(_lowercase , token=_lowercase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[str]:
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Tuple:
UpperCAmelCase : List[Any] = F'''repo_zipped_img_data-{int(time.time() * 10e3 )}'''
UpperCAmelCase : List[str] = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(_lowercase , token=_lowercase , repo_type="""dataset""" , private=_lowercase )
hf_api.upload_file(
token=_lowercase , path_or_fileobj=str(_lowercase ) , path_in_repo="""data.zip""" , repo_id=_lowercase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(_lowercase , token=_lowercase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[Any]:
return hf_private_dataset_repo_zipped_img_data_
| 265 | 0 |
'''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def __lowerCamelCase ( __snake_case : str, __snake_case : float | Decimal, __snake_case : float = 10**-10 ) -> float:
"""simple docstring"""
A__ : List[Any] =a
while True:
A__ : Dict =Decimal(__snake_case ) - (
Decimal(eval(__snake_case ) ) / Decimal(eval(str(diff(__snake_case ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(__snake_case ) ) < precision: # noqa: S307
return float(__snake_case )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
print(F"""The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}""")
# Find Square Root of 5
print(F"""The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}""")
# Exponential Roots
print(F"""The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}""")
| 136 |
'''simple docstring'''
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : Any , lowerCAmelCase_ : float ) -> float:
'''simple docstring'''
return 0.0
def __lowerCamelCase ( __snake_case : np.ndarray, __snake_case : int ) -> tuple[int | float, int | float]:
"""simple docstring"""
A__ : Tuple =min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
A__ : str =max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def __lowerCamelCase ( __snake_case : FilterType, __snake_case : int ) -> None:
"""simple docstring"""
A__ : Any =512
A__ : int =[1] + [0] * (size - 1)
A__ : int =[filter_type.process(__snake_case ) for item in inputs]
A__ : Union[str, Any] =[0] * (samplerate - size) # zero-padding
outputs += filler
A__ : List[Any] =np.abs(np.fft.fft(__snake_case ) )
A__ : int =20 * np.logaa(__snake_case )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24, samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
# Display within reasonable bounds
A__ : Union[str, Any] =get_bounds(__snake_case, __snake_case )
plt.ylim(max([-80, bounds[0]] ), min([80, bounds[1]] ) )
plt.ylabel("""Gain (dB)""" )
plt.plot(__snake_case )
plt.show()
def __lowerCamelCase ( __snake_case : FilterType, __snake_case : int ) -> None:
"""simple docstring"""
A__ : List[Any] =512
A__ : List[Any] =[1] + [0] * (size - 1)
A__ : Dict =[filter_type.process(__snake_case ) for item in inputs]
A__ : Union[str, Any] =[0] * (samplerate - size) # zero-padding
outputs += filler
A__ : str =np.angle(np.fft.fft(__snake_case ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24, samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
plt.ylim(-2 * pi, 2 * pi )
plt.ylabel("""Phase shift (Radians)""" )
plt.plot(np.unwrap(__snake_case, -2 * pi ) )
plt.show()
| 136 | 1 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def UpperCamelCase_ ( snake_case_ : Any ) -> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(snake_case_ , snake_case_ )
def UpperCamelCase_ ( snake_case_ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = emb.weight.shape
__lowerCAmelCase = nn.Linear(snake_case_ , snake_case_ , bias=snake_case_ )
__lowerCAmelCase = emb.weight.data
return lin_layer
def UpperCamelCase_ ( snake_case_ : Any ) -> Any:
'''simple docstring'''
__lowerCAmelCase = torch.load(snake_case_ , map_location="""cpu""" )
__lowerCAmelCase = mam_aaa["""args"""] or mam_aaa["""cfg"""]["""model"""]
__lowerCAmelCase = mam_aaa["""model"""]
remove_ignore_keys_(snake_case_ )
__lowerCAmelCase = state_dict["""encoder.embed_tokens.weight"""].shape[0]
__lowerCAmelCase = MaMaaaConfig(
vocab_size=snake_case_ , max_position_embeddings=10_24 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , )
__lowerCAmelCase = state_dict["""decoder.embed_tokens.weight"""]
__lowerCAmelCase = MaMaaaForConditionalGeneration(snake_case_ )
model.model.load_state_dict(snake_case_ , strict=snake_case_ )
__lowerCAmelCase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
_A : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
_A : str = parser.parse_args()
_A : Optional[int] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 229 | '''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowercase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Any = KandinskyVaaControlnetImgaImgPipeline
_SCREAMING_SNAKE_CASE : Dict = ["""image_embeds""", """negative_image_embeds""", """image""", """hint"""]
_SCREAMING_SNAKE_CASE : List[Any] = ["""image_embeds""", """negative_image_embeds""", """image""", """hint"""]
_SCREAMING_SNAKE_CASE : Dict = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
_SCREAMING_SNAKE_CASE : Optional[int] = False
@property
def a ( self : int ) -> Optional[Any]:
return 32
@property
def a ( self : Union[str, Any] ) -> Dict:
return 32
@property
def a ( self : str ) -> Union[str, Any]:
return self.time_input_dim
@property
def a ( self : Tuple ) -> Optional[Any]:
return self.time_input_dim * 4
@property
def a ( self : Union[str, Any] ) -> List[Any]:
return 1_00
@property
def a ( self : Optional[int] ) -> Any:
torch.manual_seed(0 )
__lowerCAmelCase = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__lowerCAmelCase = UNetaDConditionModel(**SCREAMING_SNAKE_CASE__ )
return model
@property
def a ( self : Tuple ) -> Optional[Any]:
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def a ( self : Optional[Any] ) -> Union[str, Any]:
torch.manual_seed(0 )
__lowerCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def a ( self : Any ) -> Dict:
__lowerCAmelCase = self.dummy_unet
__lowerCAmelCase = self.dummy_movq
__lowerCAmelCase = {
"""num_train_timesteps""": 10_00,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_0_0_8_5,
"""beta_end""": 0.0_1_2,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
__lowerCAmelCase = DDIMScheduler(**SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str]=0 ) -> Dict:
__lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
SCREAMING_SNAKE_CASE__ )
# create init_image
__lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCAmelCase = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE__ ) ).convert("""RGB""" ).resize((2_56, 2_56) )
# create hint
__lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
if str(SCREAMING_SNAKE_CASE__ ).startswith("""mps""" ):
__lowerCAmelCase = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
__lowerCAmelCase = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def a ( self : List[Any] ) -> int:
__lowerCAmelCase = """cpu"""
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) )
__lowerCAmelCase = output.images
__lowerCAmelCase = pipe(
**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) , return_dict=SCREAMING_SNAKE_CASE__ , )[0]
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase = np.array(
[0.5_4_9_8_5_0_3_4, 0.5_5_5_0_9_3_6_5, 0.5_2_5_6_1_5_0_4, 0.5_5_7_0_4_9_4, 0.5_5_9_3_8_1_8, 0.5_2_6_3_9_7_9, 0.5_0_2_8_5_6_4_3, 0.5_0_6_9_8_4_6, 0.5_1_1_9_6_7_3_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def a ( self : Any ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : int ) -> Optional[Any]:
__lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy""" )
__lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
__lowerCAmelCase = init_image.resize((5_12, 5_12) )
__lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
__lowerCAmelCase = torch.from_numpy(np.array(SCREAMING_SNAKE_CASE__ ) ).float() / 2_5_5.0
__lowerCAmelCase = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
__lowerCAmelCase = """A robot, 4k photo"""
__lowerCAmelCase = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa )
__lowerCAmelCase = pipeline.to(SCREAMING_SNAKE_CASE__ )
pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCAmelCase , __lowerCAmelCase = pipe_prior(
SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , strength=0.8_5 , generator=SCREAMING_SNAKE_CASE__ , negative_prompt="""""" , ).to_tuple()
__lowerCAmelCase = pipeline(
image=SCREAMING_SNAKE_CASE__ , image_embeds=SCREAMING_SNAKE_CASE__ , negative_image_embeds=SCREAMING_SNAKE_CASE__ , hint=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=1_00 , height=5_12 , width=5_12 , strength=0.5 , output_type="""np""" , )
__lowerCAmelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 229 | 1 |
'''simple docstring'''
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase = False ) -> list[float]:
if radian_mode:
return [magnitude * cos(UpperCamelCase ), magnitude * sin(UpperCamelCase )]
return [magnitude * cos(radians(UpperCamelCase ) ), magnitude * sin(radians(UpperCamelCase ) )]
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase = 10**-1 ) -> bool:
lowerCamelCase__ : NDArray[floataa] = cross(UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : float = sum(UpperCamelCase )
return abs(UpperCamelCase ) < eps
if __name__ == "__main__":
# Test to check if it works
_A : str =array(
[
polar_force(718.4, 180 - 30),
polar_force(879.54, 45),
polar_force(100, -90),
]
)
_A : NDArray[floataa] =array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
_A : Any =array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
_A : int =array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
_A : Optional[int] =array([[0, -2_000], [0, -1_200], [0, 15_600], [0, -12_400]])
_A : List[Any] =array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 129 |
'''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
_A : Union[str, Any] =logging.get_logger(__name__)
_A : Optional[Any] ={'''vocab_file''': '''spiece.model'''}
_A : Optional[Any] ={
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
}
}
# TODO(PVP) - this should be removed in Transformers v5
_A : Union[str, Any] ={
'''t5-small''': 512,
'''t5-base''': 512,
'''t5-large''': 512,
'''t5-3b''': 512,
'''t5-11b''': 512,
}
_A : int ='''▁'''
class _lowercase ( _lowercase ):
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = ["""input_ids""", """attention_mask"""]
def __init__( self: int , UpperCamelCase__: int , UpperCamelCase__: List[str]="</s>" , UpperCamelCase__: Optional[Any]="<unk>" , UpperCamelCase__: Dict="<pad>" , UpperCamelCase__: List[Any]=100 , UpperCamelCase__: Dict=None , UpperCamelCase__: Optional[Dict[str, Any]] = None , UpperCamelCase__: Union[str, Any]=True , **UpperCamelCase__: Dict , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
lowerCamelCase__ : Union[str, Any] = [F'''<extra_id_{i}>''' for i in range(UpperCamelCase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
lowerCamelCase__ : Optional[Any] = len(set(filter(lambda UpperCamelCase__ : bool("""extra_id""" in str(UpperCamelCase__ ) ) , UpperCamelCase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
""" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"""
""" tokens""" )
if legacy:
logger.warning_once(
F'''You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'''
""" read the related pull request available at https://github.com/huggingface/transformers/pull/24565""" )
lowerCamelCase__ : Optional[int] = legacy
lowerCamelCase__ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , extra_ids=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , legacy=UpperCamelCase__ , **UpperCamelCase__ , )
lowerCamelCase__ : Tuple = vocab_file
lowerCamelCase__ : Dict = extra_ids
lowerCamelCase__ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
@staticmethod
def lowerCamelCase_ ( UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: int ):
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
lowerCamelCase__ : Any = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"""This tokenizer was incorrectly instantiated with a model max length of"""
F''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
""" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"""
""" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"""
F''' {pretrained_model_name_or_path} automatically truncating your input to'''
F''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
F''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
""" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"""
""" instantiate this tokenizer with `model_max_length` set to your preferred value.""" , UpperCamelCase__ , )
return max_model_length
@property
def lowerCamelCase_ ( self: Any ):
return self.sp_model.get_piece_size() + self._extra_ids
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : str = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: List[int] , UpperCamelCase__: Optional[List[int]] = None , UpperCamelCase__: bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(UpperCamelCase__ )) + [1]
return ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1]
def lowerCamelCase_ ( self: Dict ):
return list(
set(filter(lambda UpperCamelCase__ : bool(re.search(R"""<extra_id_\d+>""" , UpperCamelCase__ ) ) is not None , self.additional_special_tokens ) ) )
def lowerCamelCase_ ( self: str ):
return [self._convert_token_to_id(UpperCamelCase__ ) for token in self.get_sentinel_tokens()]
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: List[int] ):
if len(UpperCamelCase__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
""" eos tokens being added.""" )
return token_ids
else:
return token_ids + [self.eos_token_id]
def lowerCamelCase_ ( self: str , UpperCamelCase__: List[int] , UpperCamelCase__: Optional[List[int]] = None ):
lowerCamelCase__ : Optional[Any] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def lowerCamelCase_ ( self: int , UpperCamelCase__: List[int] , UpperCamelCase__: Optional[List[int]] = None ):
lowerCamelCase__ : List[str] = self._add_eos_if_not_present(UpperCamelCase__ )
if token_ids_a is None:
return token_ids_a
else:
lowerCamelCase__ : int = self._add_eos_if_not_present(UpperCamelCase__ )
return token_ids_a + token_ids_a
def __getstate__( self: List[str] ):
lowerCamelCase__ : Optional[int] = self.__dict__.copy()
lowerCamelCase__ : Optional[Any] = None
return state
def __setstate__( self: List[Any] , UpperCamelCase__: Any ):
lowerCamelCase__ : Tuple = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCamelCase__ : str = {}
lowerCamelCase__ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: "TextInput" , **UpperCamelCase__: List[str] ):
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
lowerCamelCase__ : List[Any] = SPIECE_UNDERLINE + text.replace(UpperCamelCase__ , """ """ )
return super().tokenize(UpperCamelCase__ , **UpperCamelCase__ )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: str , **UpperCamelCase__: str ):
if not self.legacy:
lowerCamelCase__ : List[Any] = text.startswith(UpperCamelCase__ )
if is_first:
lowerCamelCase__ : Optional[int] = text[1:]
lowerCamelCase__ : int = self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
if not self.legacy and not is_first and not text.startswith(""" """ ) and tokens[0].startswith(UpperCamelCase__ ):
lowerCamelCase__ : str = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Optional[Any] ):
if token.startswith("""<extra_id_""" ):
lowerCamelCase__ : List[Any] = re.match(R"""<extra_id_(\d+)>""" , UpperCamelCase__ )
lowerCamelCase__ : List[Any] = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: int ):
if index < self.sp_model.get_piece_size():
lowerCamelCase__ : str = self.sp_model.IdToPiece(UpperCamelCase__ )
else:
lowerCamelCase__ : Tuple = F'''<extra_id_{self.vocab_size - 1 - index}>'''
return token
def lowerCamelCase_ ( self: str , UpperCamelCase__: Tuple ):
lowerCamelCase__ : str = []
lowerCamelCase__ : Any = """"""
lowerCamelCase__ : Union[str, Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCamelCase__ ) + token
lowerCamelCase__ : Dict = True
lowerCamelCase__ : str = []
else:
current_sub_tokens.append(UpperCamelCase__ )
lowerCamelCase__ : List[str] = False
out_string += self.sp_model.decode(UpperCamelCase__ )
return out_string.strip()
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: str , UpperCamelCase__: Optional[str] = None ):
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase__ : List[Any] = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , """wb""" ) as fi:
lowerCamelCase__ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
| 129 | 1 |
"""simple docstring"""
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __a , __a = None , __a = None , __a = False , __a = False , __a = None , __a = None , **__a , ):
super().__init__(
features=__a , cache_dir=__a , keep_in_memory=__a , streaming=__a , num_proc=__a , **__a , )
__lowerCAmelCase = Generator(
cache_dir=__a , features=__a , generator=__a , gen_kwargs=__a , **__a , )
def snake_case ( self ):
# Build iterable dataset
if self.streaming:
__lowerCAmelCase = self.builder.as_streaming_dataset(split="train" )
# Build regular (map-style) dataset
else:
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
self.builder.download_and_prepare(
download_config=__a , download_mode=__a , verification_mode=__a , base_path=__a , num_proc=self.num_proc , )
__lowerCAmelCase = self.builder.as_dataset(
split="train" , verification_mode=__a , in_memory=self.keep_in_memory )
return dataset
| 57 |
'''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
a : Optional[int] = _symbol_database.Default()
a : Any = _descriptor_pool.Default().AddSerializedFile(
B"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"
)
a : Tuple = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
a : str = None
a : Optional[Any] = B"H\003"
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
a : str = 45
a : Any = 15_81
a : List[Any] = 15_17
a : Union[str, Any] = 15_70
a : Optional[Any] = 15_84
a : List[str] = 17_93
a : Optional[Any] = 17_95
a : Tuple = 19_16
a : Optional[Any] = 18_64
a : int = 19_05
a : Optional[Any] = 19_19
a : Union[str, Any] = 24_29
a : List[Any] = 22_08
a : Dict = 24_18
a : Optional[int] = 23_23
a : str = 24_07
# @@protoc_insertion_point(module_scope)
| 311 | 0 |
import math
def lowerCAmelCase_ ( __UpperCAmelCase: float , __UpperCAmelCase: float ) -> float:
if (
not isinstance(__UpperCAmelCase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * power_factor
def lowerCAmelCase_ ( __UpperCAmelCase: float , __UpperCAmelCase: float ) -> float:
if (
not isinstance(__UpperCAmelCase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 247 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
UpperCAmelCase_ = {
'configuration_audio_spectrogram_transformer': [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ASTConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ASTForAudioClassification',
'ASTModel',
'ASTPreTrainedModel',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['ASTFeatureExtractor']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 247 | 1 |
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
A : Union[str, Any] = logging.getLogger(__name__)
@dataclass
class A :
'''simple docstring'''
__lowerCamelCase : str
__lowerCamelCase : List[str]
__lowerCamelCase : Optional[List[str]]
@dataclass
class A :
'''simple docstring'''
__lowerCamelCase : List[int]
__lowerCamelCase : List[int]
__lowerCamelCase : Optional[List[int]] = None
__lowerCamelCase : Optional[List[int]] = None
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCamelCase : Dict = '''train'''
__lowerCamelCase : int = '''dev'''
__lowerCamelCase : Optional[Any] = '''test'''
class A :
'''simple docstring'''
@staticmethod
def a_ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[Split, str] ) -> List[InputExample]:
"""simple docstring"""
raise NotImplementedError
@staticmethod
def a_ ( __lowerCAmelCase : str ) -> List[str]:
"""simple docstring"""
raise NotImplementedError
@staticmethod
def a_ ( __lowerCAmelCase : List[InputExample] , __lowerCAmelCase : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : PreTrainedTokenizer , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : str="[CLS]" , __lowerCAmelCase : Dict=1 , __lowerCAmelCase : Tuple="[SEP]" , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : Optional[Any]=0 , __lowerCAmelCase : List[Any]=0 , __lowerCAmelCase : Tuple=-1_00 , __lowerCAmelCase : Union[str, Any]=0 , __lowerCAmelCase : Any=True , ) -> List[InputFeatures]:
"""simple docstring"""
A__ = {label: i for i, label in enumerate(__lowerCAmelCase )}
A__ = []
for ex_index, example in enumerate(__lowerCAmelCase ):
if ex_index % 1_00_00 == 0:
logger.info("""Writing example %d of %d""" , __lowerCAmelCase , len(__lowerCAmelCase ) )
A__ = []
A__ = []
for word, label in zip(example.words , example.labels ):
A__ = tokenizer.tokenize(__lowerCAmelCase )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(__lowerCAmelCase ) > 0:
tokens.extend(__lowerCAmelCase )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(__lowerCAmelCase ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
A__ = tokenizer.num_special_tokens_to_add()
if len(__lowerCAmelCase ) > max_seq_length - special_tokens_count:
A__ = tokens[: (max_seq_length - special_tokens_count)]
A__ = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
A__ = [sequence_a_segment_id] * len(__lowerCAmelCase )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
A__ = [cls_token] + tokens
A__ = [pad_token_label_id] + label_ids
A__ = [cls_token_segment_id] + segment_ids
A__ = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
A__ = [1 if mask_padding_with_zero else 0] * len(__lowerCAmelCase )
# Zero-pad up to the sequence length.
A__ = max_seq_length - len(__lowerCAmelCase )
if pad_on_left:
A__ = ([pad_token] * padding_length) + input_ids
A__ = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
A__ = ([pad_token_segment_id] * padding_length) + segment_ids
A__ = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(__lowerCAmelCase ) == max_seq_length
assert len(__lowerCAmelCase ) == max_seq_length
assert len(__lowerCAmelCase ) == max_seq_length
assert len(__lowerCAmelCase ) == max_seq_length
if ex_index < 5:
logger.info("""*** Example ***""" )
logger.info("""guid: %s""" , example.guid )
logger.info("""tokens: %s""" , """ """.join([str(__lowerCAmelCase ) for x in tokens] ) )
logger.info("""input_ids: %s""" , """ """.join([str(__lowerCAmelCase ) for x in input_ids] ) )
logger.info("""input_mask: %s""" , """ """.join([str(__lowerCAmelCase ) for x in input_mask] ) )
logger.info("""segment_ids: %s""" , """ """.join([str(__lowerCAmelCase ) for x in segment_ids] ) )
logger.info("""label_ids: %s""" , """ """.join([str(__lowerCAmelCase ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
A__ = None
features.append(
InputFeatures(
input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , label_ids=__lowerCAmelCase ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCamelCase : List[InputFeatures]
__lowerCamelCase : int = nn.CrossEntropyLoss().ignore_index
def __init__( self : Tuple , __lowerCAmelCase : TokenClassificationTask , __lowerCAmelCase : str , __lowerCAmelCase : PreTrainedTokenizer , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : Split = Split.train , ) -> int:
"""simple docstring"""
A__ = os.path.join(
__lowerCAmelCase , """cached_{}_{}_{}""".format(mode.value , tokenizer.__class__.__name__ , str(__lowerCAmelCase ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
A__ = cached_features_file + """.lock"""
with FileLock(__lowerCAmelCase ):
if os.path.exists(__lowerCAmelCase ) and not overwrite_cache:
logger.info(f'Loading features from cached file {cached_features_file}' )
A__ = torch.load(__lowerCAmelCase )
else:
logger.info(f'Creating features from dataset file at {data_dir}' )
A__ = token_classification_task.read_examples_from_file(__lowerCAmelCase , __lowerCAmelCase )
# TODO clean up all this to leverage built-in features of tokenizers
A__ = token_classification_task.convert_examples_to_features(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__lowerCAmelCase , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f'Saving features into cached file {cached_features_file}' )
torch.save(self.features , __lowerCAmelCase )
def __len__( self : List[str] ) -> Any:
"""simple docstring"""
return len(self.features )
def __getitem__( self : List[str] , __lowerCAmelCase : Tuple ) -> InputFeatures:
"""simple docstring"""
return self.features[i]
if is_tf_available():
import tensorflow as tf
class A :
'''simple docstring'''
__lowerCamelCase : List[InputFeatures]
__lowerCamelCase : int = -100
def __init__( self : Optional[int] , __lowerCAmelCase : TokenClassificationTask , __lowerCAmelCase : str , __lowerCAmelCase : PreTrainedTokenizer , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Split = Split.train , ) -> int:
"""simple docstring"""
A__ = token_classification_task.read_examples_from_file(__lowerCAmelCase , __lowerCAmelCase )
# TODO clean up all this to leverage built-in features of tokenizers
A__ = token_classification_task.convert_examples_to_features(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__lowerCAmelCase , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
A__ = tf.data.Dataset.from_generator(
__lowerCAmelCase , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa}, tf.intaa) , (
{"""input_ids""": tf.TensorShape([None] ), """attention_mask""": tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
A__ = tf.data.Dataset.from_generator(
__lowerCAmelCase , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa, """token_type_ids""": tf.intaa}, tf.intaa) , (
{
"""input_ids""": tf.TensorShape([None] ),
"""attention_mask""": tf.TensorShape([None] ),
"""token_type_ids""": tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def a_ ( self : Dict ) -> Any:
"""simple docstring"""
A__ = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : Tuple ) -> List[str]:
"""simple docstring"""
return len(self.features )
def __getitem__( self : int , __lowerCAmelCase : Union[str, Any] ) -> InputFeatures:
"""simple docstring"""
return self.features[i]
| 274 |
def __lowerCamelCase ( __a :int = 1_0_0_0_0_0_0 ) -> int:
"""simple docstring"""
A__ = limit + 1
A__ = [0] * limit
for first_term in range(1 , __a ):
for n in range(__a , __a , __a ):
A__ = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
A__ = sum(1 for x in frequency[1:limit] if x == 1_0 )
return count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 274 | 1 |
"""simple docstring"""
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class __A (snake_case__):
'''simple docstring'''
def __init__( self : int , UpperCAmelCase_ : pyspark.sql.DataFrame , UpperCAmelCase_ : Optional[NamedSplit] = None , UpperCAmelCase_ : Optional[Features] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : str = None , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : str = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : str = "arrow" , **UpperCAmelCase_ : Optional[Any] , ) ->Optional[Any]:
"""simple docstring"""
super().__init__(
split=UpperCAmelCase_ , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , keep_in_memory=UpperCAmelCase_ , streaming=UpperCAmelCase_ , **UpperCAmelCase_ , )
snake_case_ = load_from_cache_file
snake_case_ = file_format
snake_case_ = Spark(
df=UpperCAmelCase_ , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , working_dir=UpperCAmelCase_ , **UpperCAmelCase_ , )
def lowerCAmelCase ( self : Optional[Any] ) ->Union[str, Any]:
"""simple docstring"""
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
snake_case_ = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=UpperCAmelCase_ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 366 |
"""simple docstring"""
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
__SCREAMING_SNAKE_CASE : Dict = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
__SCREAMING_SNAKE_CASE : Optional[int] = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
__SCREAMING_SNAKE_CASE : Tuple = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class __A (datasets.Metric):
'''simple docstring'''
def lowerCAmelCase ( self : Tuple ) ->Union[str, Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/krishnap25/mauve""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/krishnap25/mauve"""] , reference_urls=[
"""https://arxiv.org/abs/2102.01454""",
"""https://github.com/krishnap25/mauve""",
] , )
def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : int="auto" , UpperCAmelCase_ : Dict=-1 , UpperCAmelCase_ : Optional[Any]=0.9 , UpperCAmelCase_ : Dict=5 , UpperCAmelCase_ : Optional[int]=500 , UpperCAmelCase_ : Any="gpt2-large" , UpperCAmelCase_ : Union[str, Any]=-1 , UpperCAmelCase_ : Optional[Any]=1_024 , UpperCAmelCase_ : Dict=25 , UpperCAmelCase_ : Optional[Any]=5 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Union[str, Any]=25 , ) ->List[Any]:
"""simple docstring"""
snake_case_ = compute_mauve(
p_text=UpperCAmelCase_ , q_text=UpperCAmelCase_ , p_features=UpperCAmelCase_ , q_features=UpperCAmelCase_ , p_tokens=UpperCAmelCase_ , q_tokens=UpperCAmelCase_ , num_buckets=UpperCAmelCase_ , pca_max_data=UpperCAmelCase_ , kmeans_explained_var=UpperCAmelCase_ , kmeans_num_redo=UpperCAmelCase_ , kmeans_max_iter=UpperCAmelCase_ , featurize_model_name=UpperCAmelCase_ , device_id=UpperCAmelCase_ , max_text_length=UpperCAmelCase_ , divergence_curve_discretization_size=UpperCAmelCase_ , mauve_scaling_factor=UpperCAmelCase_ , verbose=UpperCAmelCase_ , seed=UpperCAmelCase_ , )
return out
| 233 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase_ : int = {
"""configuration_upernet""": ["""UperNetConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[int] = [
"""UperNetForSemanticSegmentation""",
"""UperNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
lowerCamelCase_ : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 81 |
"""simple docstring"""
def _A ( lowercase , lowercase ):
"""simple docstring"""
return number | (1 << position)
def _A ( lowercase , lowercase ):
"""simple docstring"""
return number & ~(1 << position)
def _A ( lowercase , lowercase ):
"""simple docstring"""
return number ^ (1 << position)
def _A ( lowercase , lowercase ):
"""simple docstring"""
return ((number >> position) & 1) == 1
def _A ( lowercase , lowercase ):
"""simple docstring"""
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 81 | 1 |
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise ValueError("check_bouncy() accepts only integer arguments" )
__SCREAMING_SNAKE_CASE = str(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = "".join(sorted(lowerCAmelCase_ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def UpperCAmelCase__ (lowerCAmelCase_ = 99 ):
'''simple docstring'''
if not 0 < percent < 100:
raise ValueError("solution() only accepts values from 0 to 100" )
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 1
while True:
if check_bouncy(lowerCAmelCase_ ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"{solution(9_9)}")
| 195 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
a__ : Optional[int] = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : Optional[Any] = ["pixel_values"]
def __init__( self : Dict , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[int, float] = 1 / 2_5_5 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : bool = True , **UpperCAmelCase__ : Dict , ) -> None:
super().__init__(**UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = size if size is not None else {"shortest_edge": 2_2_4}
__SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {"height": 2_5_6, "width": 2_5_6}
__SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase__ , param_name="crop_size" )
__SCREAMING_SNAKE_CASE = do_resize
__SCREAMING_SNAKE_CASE = size
__SCREAMING_SNAKE_CASE = resample
__SCREAMING_SNAKE_CASE = do_rescale
__SCREAMING_SNAKE_CASE = rescale_factor
__SCREAMING_SNAKE_CASE = do_center_crop
__SCREAMING_SNAKE_CASE = crop_size
__SCREAMING_SNAKE_CASE = do_flip_channel_order
def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : PILImageResampling = PIL.Image.BILINEAR , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Optional[Any] , ) -> np.ndarray:
__SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
__SCREAMING_SNAKE_CASE = get_resize_output_image_size(UpperCAmelCase__ , size=size["shortest_edge"] , default_to_square=UpperCAmelCase__ )
return resize(UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Any , ) -> np.ndarray:
__SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(UpperCAmelCase__ , size=(size["height"], size["width"]) , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Union[int, float] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Dict , ) -> Dict:
return rescale(UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None ) -> np.ndarray:
return flip_channel_order(UpperCAmelCase__ , data_format=UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : float = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase__ : str , ) -> PIL.Image.Image:
__SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
__SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
__SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale
__SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor
__SCREAMING_SNAKE_CASE = do_center_crop if do_center_crop is not None else self.do_center_crop
__SCREAMING_SNAKE_CASE = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
__SCREAMING_SNAKE_CASE = size if size is not None else self.size
__SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else self.crop_size
__SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase__ , param_name="crop_size" )
__SCREAMING_SNAKE_CASE = make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
# All transformations expect numpy arrays.
__SCREAMING_SNAKE_CASE = [to_numpy_array(UpperCAmelCase__ ) for image in images]
if do_resize:
__SCREAMING_SNAKE_CASE = [self.resize(image=UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ ) for image in images]
if do_center_crop:
__SCREAMING_SNAKE_CASE = [self.center_crop(image=UpperCAmelCase__ , size=UpperCAmelCase__ ) for image in images]
if do_rescale:
__SCREAMING_SNAKE_CASE = [self.rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
__SCREAMING_SNAKE_CASE = [self.flip_channel_order(image=UpperCAmelCase__ ) for image in images]
__SCREAMING_SNAKE_CASE = [to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images]
__SCREAMING_SNAKE_CASE = {"pixel_values": images}
return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Tuple] = None ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = target_sizes.numpy()
__SCREAMING_SNAKE_CASE = []
for idx in range(len(UpperCAmelCase__ ) ):
__SCREAMING_SNAKE_CASE = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE = logits.argmax(dim=1 )
__SCREAMING_SNAKE_CASE = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 195 | 1 |
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase__ :
def __init__( self : Any ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : List[str]=13 ,lowerCamelCase__ : Union[str, Any]=30 ,lowerCamelCase__ : Dict=2 ,lowerCamelCase__ : int=3 ,lowerCamelCase__ : Tuple=True ,lowerCamelCase__ : Any=True ,lowerCamelCase__ : Optional[Any]=32 ,lowerCamelCase__ : List[str]=5 ,lowerCamelCase__ : Optional[Any]=4 ,lowerCamelCase__ : Optional[int]=37 ,lowerCamelCase__ : Dict="gelu" ,lowerCamelCase__ : List[Any]=0.1 ,lowerCamelCase__ : Dict=0.1 ,lowerCamelCase__ : Tuple=10 ,lowerCamelCase__ : str=0.0_2 ,lowerCamelCase__ : List[Any]=3 ,lowerCamelCase__ : int=0.6 ,lowerCamelCase__ : Tuple=None ,):
'''simple docstring'''
_UpperCamelCase : Dict = parent
_UpperCamelCase : Tuple = batch_size
_UpperCamelCase : Optional[int] = image_size
_UpperCamelCase : Tuple = patch_size
_UpperCamelCase : Optional[Any] = num_channels
_UpperCamelCase : int = is_training
_UpperCamelCase : int = use_labels
_UpperCamelCase : Optional[Any] = hidden_size
_UpperCamelCase : Any = num_hidden_layers
_UpperCamelCase : int = num_attention_heads
_UpperCamelCase : List[Any] = intermediate_size
_UpperCamelCase : List[Any] = hidden_act
_UpperCamelCase : Optional[Any] = hidden_dropout_prob
_UpperCamelCase : str = attention_probs_dropout_prob
_UpperCamelCase : int = type_sequence_label_size
_UpperCamelCase : str = initializer_range
_UpperCamelCase : List[Any] = mask_ratio
_UpperCamelCase : Tuple = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_UpperCamelCase : Optional[int] = (image_size // patch_size) ** 2
_UpperCamelCase : int = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase : int = None
if self.use_labels:
_UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_UpperCamelCase : Dict = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=lowerCamelCase__ ,initializer_range=self.initializer_range ,mask_ratio=self.mask_ratio ,)
def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Dict ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = ViTMAEModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCamelCase : Any = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : int ):
'''simple docstring'''
_UpperCamelCase : int = ViTMAEForPreTraining(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCamelCase : List[str] = model(lowerCamelCase__ )
_UpperCamelCase : List[str] = (self.image_size // self.patch_size) ** 2
_UpperCamelCase : List[str] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
_UpperCamelCase : int = 1
_UpperCamelCase : Dict = ViTMAEForPreTraining(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCamelCase : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCamelCase : str = model(lowerCamelCase__ )
_UpperCamelCase : Optional[int] = self.patch_size**2
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[int] = config_and_inputs
_UpperCamelCase : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase__ ( lowercase , lowercase , unittest.TestCase ):
lowercase__ = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
lowercase__ = {"""feature-extraction""": ViTMAEModel} if is_torch_available() else {}
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_UpperCamelCase : List[str] = ViTMAEModelTester(self )
_UpperCamelCase : List[str] = ConfigTester(self ,config_class=lowerCamelCase__ ,has_text_modality=lowerCamelCase__ ,hidden_size=37 )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds' )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Any = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_UpperCamelCase : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ ,nn.Linear ) )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : str = model_class(lowerCamelCase__ )
_UpperCamelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : int = [*signature.parameters.keys()]
_UpperCamelCase : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] ,lowerCamelCase__ )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase__ )
def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : Any ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
# make masks reproducible
np.random.seed(2 )
_UpperCamelCase : int = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
_UpperCamelCase : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_UpperCamelCase : Any = torch.from_numpy(lowerCamelCase__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_UpperCamelCase : Optional[Any] = pt_noise
super().check_pt_tf_models(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Union[str, Any] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_UpperCamelCase : Tuple = model(**self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ) )
_UpperCamelCase : Any = outputs[0].cpu().numpy()
_UpperCamelCase : List[str] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase__ )
_UpperCamelCase : Any = model_class.from_pretrained(lowerCamelCase__ )
model.to(lowerCamelCase__ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_UpperCamelCase : Optional[Any] = model(**self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ) )
# Make sure we don't have nans
_UpperCamelCase : str = after_outputs[0].cpu().numpy()
_UpperCamelCase : Optional[int] = 0
_UpperCamelCase : Dict = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase__ ,1E-5 )
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
pass
@slow
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : int = ViTMAEModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def A__ ( ):
_UpperCamelCase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase__ ( unittest.TestCase ):
@cached_property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
_UpperCamelCase : str = ViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' ).to(lowerCamelCase__ )
_UpperCamelCase : Any = self.default_image_processor
_UpperCamelCase : List[str] = prepare_img()
_UpperCamelCase : List[str] = image_processor(images=lowerCamelCase__ ,return_tensors='pt' ).to(lowerCamelCase__ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_UpperCamelCase : Any = ViTMAEConfig()
_UpperCamelCase : str = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
_UpperCamelCase : Any = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
_UpperCamelCase : Any = model(**lowerCamelCase__ ,noise=torch.from_numpy(lowerCamelCase__ ).to(device=lowerCamelCase__ ) )
# verify the logits
_UpperCamelCase : str = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape ,lowerCamelCase__ )
_UpperCamelCase : List[str] = torch.tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] ,expected_slice.to(lowerCamelCase__ ) ,atol=1E-4 ) )
| 83 |
'''simple docstring'''
import math
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
UpperCAmelCase__ = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str]=1 , **SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
UpperCAmelCase__ = factor * value
UpperCAmelCase__ = value
while not is_prime(SCREAMING_SNAKE_CASE__ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **SCREAMING_SNAKE_CASE__ )
return value
| 346 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class lowercase :
# setable values
lowercase__ : Optional[int] = None
lowercase__ : Optional[jnp.ndarray] = None
lowercase__ : Optional[jnp.ndarray] = None # sigma(t_i)
@classmethod
def __snake_case( cls : int ) -> List[Any]:
'''simple docstring'''
return cls()
@dataclass
class lowercase ( a ):
lowercase__ : jnp.ndarray
lowercase__ : jnp.ndarray
lowercase__ : KarrasVeSchedulerState
class lowercase ( a , a ):
@property
def __snake_case( self : str ) -> List[str]:
'''simple docstring'''
return True
@register_to_config
def __init__( self : Union[str, Any] , _UpperCamelCase : float = 0.0_2 , _UpperCamelCase : float = 100 , _UpperCamelCase : float = 1.0_0_7 , _UpperCamelCase : float = 80 , _UpperCamelCase : float = 0.0_5 , _UpperCamelCase : float = 50 , ) -> Union[str, Any]:
'''simple docstring'''
pass
def __snake_case( self : int ) -> Union[str, Any]:
'''simple docstring'''
return KarrasVeSchedulerState.create()
def __snake_case( self : Optional[Any] , _UpperCamelCase : KarrasVeSchedulerState , _UpperCamelCase : int , _UpperCamelCase : Tuple = () ) -> KarrasVeSchedulerState:
'''simple docstring'''
SCREAMING_SNAKE_CASE = jnp.arange(0 , _UpperCamelCase )[::-1].copy()
SCREAMING_SNAKE_CASE = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=_UpperCamelCase , schedule=jnp.array(_UpperCamelCase , dtype=jnp.floataa ) , timesteps=_UpperCamelCase , )
def __snake_case( self : Tuple , _UpperCamelCase : KarrasVeSchedulerState , _UpperCamelCase : jnp.ndarray , _UpperCamelCase : float , _UpperCamelCase : random.KeyArray , ) -> Tuple[jnp.ndarray, float]:
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
SCREAMING_SNAKE_CASE = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
SCREAMING_SNAKE_CASE = 0
# sample eps ~ N(0, S_noise^2 * I)
SCREAMING_SNAKE_CASE = random.split(_UpperCamelCase , num=1 )
SCREAMING_SNAKE_CASE = self.config.s_noise * random.normal(key=_UpperCamelCase , shape=sample.shape )
SCREAMING_SNAKE_CASE = sigma + gamma * sigma
SCREAMING_SNAKE_CASE = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __snake_case( self : Optional[Any] , _UpperCamelCase : KarrasVeSchedulerState , _UpperCamelCase : jnp.ndarray , _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : jnp.ndarray , _UpperCamelCase : bool = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = sample_hat + sigma_hat * model_output
SCREAMING_SNAKE_CASE = (sample_hat - pred_original_sample) / sigma_hat
SCREAMING_SNAKE_CASE = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=_UpperCamelCase , derivative=_UpperCamelCase , state=_UpperCamelCase )
def __snake_case( self : List[str] , _UpperCamelCase : KarrasVeSchedulerState , _UpperCamelCase : jnp.ndarray , _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : jnp.ndarray , _UpperCamelCase : jnp.ndarray , _UpperCamelCase : jnp.ndarray , _UpperCamelCase : bool = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = sample_prev + sigma_prev * model_output
SCREAMING_SNAKE_CASE = (sample_prev - pred_original_sample) / sigma_prev
SCREAMING_SNAKE_CASE = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=_UpperCamelCase , derivative=_UpperCamelCase , state=_UpperCamelCase )
def __snake_case( self : int , _UpperCamelCase : KarrasVeSchedulerState , _UpperCamelCase : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : Dict ) -> int:
'''simple docstring'''
raise NotImplementedError()
| 356 | def __lowerCamelCase (UpperCAmelCase__ : str , UpperCAmelCase__ : str = " " ):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 0
for index, char in enumerate(UpperCAmelCase__ ):
if char == separator:
split_words.append(string[last_index:index] )
SCREAMING_SNAKE_CASE = index + 1
elif index + 1 == len(UpperCAmelCase__ ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 206 | 0 |
'''simple docstring'''
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__SCREAMING_SNAKE_CASE :Union[str, Any] = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
__SCREAMING_SNAKE_CASE :Tuple = subprocess.check_output(F"git diff --name-only {fork_point_sha}".split()).decode('''utf-8''').split()
__SCREAMING_SNAKE_CASE :Any = '''|'''.join(sys.argv[1:])
__SCREAMING_SNAKE_CASE :int = re.compile(RF"^({joined_dirs}).*?\.py$")
__SCREAMING_SNAKE_CASE :Tuple = [x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''')
| 22 |
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _a ( UpperCamelCase__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = OpenAIGPTTokenizer
UpperCamelCase__ = OpenAIGPTTokenizerFast
UpperCamelCase__ = True
UpperCamelCase__ = False
def UpperCAmelCase_ ( self: Dict ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase__: int = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
UpperCamelCase__: List[Any] = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
UpperCamelCase__: Tuple = ["#version: 0.2", "l o", "lo w", "e r</w>", ""]
UpperCamelCase__: List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase__: Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(__lowerCamelCase ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(__lowerCamelCase ) )
def UpperCAmelCase_ ( self: List[Any] , __lowerCamelCase: Union[str, Any] ):
'''simple docstring'''
return "lower newer", "lower newer"
def UpperCAmelCase_ ( self: List[Any] ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
UpperCamelCase__: Any = "lower"
UpperCamelCase__: int = ["low", "er</w>"]
UpperCamelCase__: Optional[Any] = tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase__: List[str] = tokens + ["<unk>"]
UpperCamelCase__: Tuple = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase )
def UpperCAmelCase_ ( self: str , __lowerCamelCase: str=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCamelCase__: List[str] = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
# Simple input
UpperCamelCase__: Union[str, Any] = "This is a simple input"
UpperCamelCase__: List[Any] = ["This is a simple input 1", "This is a simple input 2"]
UpperCamelCase__: Any = ("This is a simple input", "This is a pair")
UpperCamelCase__: Optional[int] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(__lowerCamelCase , tokenizer_r.encode , __lowerCamelCase , max_length=__lowerCamelCase , padding="max_length" )
# Simple input
self.assertRaises(__lowerCamelCase , tokenizer_r.encode_plus , __lowerCamelCase , max_length=__lowerCamelCase , padding="max_length" )
# Simple input
self.assertRaises(
__lowerCamelCase , tokenizer_r.batch_encode_plus , __lowerCamelCase , max_length=__lowerCamelCase , padding="max_length" , )
# Pair input
self.assertRaises(__lowerCamelCase , tokenizer_r.encode , __lowerCamelCase , max_length=__lowerCamelCase , padding="max_length" )
# Pair input
self.assertRaises(__lowerCamelCase , tokenizer_r.encode_plus , __lowerCamelCase , max_length=__lowerCamelCase , padding="max_length" )
# Pair input
self.assertRaises(
__lowerCamelCase , tokenizer_r.batch_encode_plus , __lowerCamelCase , max_length=__lowerCamelCase , padding="max_length" , )
def UpperCAmelCase_ ( self: Any ):
'''simple docstring'''
pass
@require_ftfy
@require_spacy
@require_tokenizers
class _a ( UpperCamelCase__):
"""simple docstring"""
pass
| 149 | 0 |
"""simple docstring"""
from random import randint, random
def UpperCAmelCase ( a_, a_, a_, a_ = False, a_ = False, a_ = 5, ):
'''simple docstring'''
lowerCamelCase : str = [[-1] * number_of_cells] # Create a highway without any car
lowerCamelCase : Any = 0
lowerCamelCase : List[str] = max(a_, 0 )
while i < number_of_cells:
lowerCamelCase : Union[str, Any] = (
randint(0, a_ ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1, max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def UpperCAmelCase ( a_, a_ ):
'''simple docstring'''
lowerCamelCase : str = 0
lowerCamelCase : str = highway_now[car_index + 1 :]
for cell in range(len(a_ ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(a_, -1 )
def UpperCAmelCase ( a_, a_, a_ ):
'''simple docstring'''
lowerCamelCase : Dict = len(a_ )
# Beforce calculations, the highway is empty
lowerCamelCase : str = [-1] * number_of_cells
for car_index in range(a_ ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
lowerCamelCase : Any = min(highway_now[car_index] + 1, a_ )
# Number of empty cell before the next car
lowerCamelCase : str = get_distance(a_, a_ ) - 1
# We can't have the car causing an accident
lowerCamelCase : Tuple = min(next_highway[car_index], a_ )
if random() < probability:
# Randomly, a driver will slow down
lowerCamelCase : Optional[int] = max(next_highway[car_index] - 1, 0 )
return next_highway
def UpperCAmelCase ( a_, a_, a_, a_ ):
'''simple docstring'''
lowerCamelCase : int = len(highway[0] )
for i in range(a_ ):
lowerCamelCase : Dict = update(highway[i], a_, a_ )
lowerCamelCase : Any = [-1] * number_of_cells
for car_index in range(a_ ):
lowerCamelCase : Dict = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
lowerCamelCase : Optional[Any] = (car_index + speed) % number_of_cells
# Commit the change of position
lowerCamelCase : Optional[Any] = speed
highway.append(a_ )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350 |
"""simple docstring"""
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class _lowercase ( ctypes.Structure ):
# _fields is a specific attr expected by ctypes
lowercase_ = [('size', ctypes.c_int), ('visible', ctypes.c_byte)]
def UpperCAmelCase ( ):
'''simple docstring'''
if os.name == "nt":
lowerCamelCase : Optional[int] = CursorInfo()
lowerCamelCase : Union[str, Any] = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(a_, ctypes.byref(a_ ) )
lowerCamelCase : Dict = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(a_, ctypes.byref(a_ ) )
elif os.name == "posix":
sys.stdout.write('\033[?25l' )
sys.stdout.flush()
def UpperCAmelCase ( ):
'''simple docstring'''
if os.name == "nt":
lowerCamelCase : List[str] = CursorInfo()
lowerCamelCase : List[Any] = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(a_, ctypes.byref(a_ ) )
lowerCamelCase : Optional[Any] = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(a_, ctypes.byref(a_ ) )
elif os.name == "posix":
sys.stdout.write('\033[?25h' )
sys.stdout.flush()
@contextmanager
def UpperCAmelCase ( ):
'''simple docstring'''
try:
hide_cursor()
yield
finally:
show_cursor()
| 205 | 0 |
'''simple docstring'''
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
UpperCAmelCase_ = trt.Logger(trt.Logger.WARNING)
UpperCAmelCase_ = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
UpperCAmelCase_ = logging.getLogger(__name__)
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--onnx_model_path',
default=None,
type=str,
required=True,
help='Path to ONNX model: ',
)
parser.add_argument(
'--output_dir',
default=None,
type=str,
required=True,
help='The output directory where the model checkpoints and predictions will be written.',
)
# Other parameters
parser.add_argument(
'--tokenizer_name',
default='',
type=str,
required=True,
help='Pretrained tokenizer name or path if not the same as model_name',
)
parser.add_argument(
'--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.',
)
parser.add_argument(
'--null_score_diff_threshold',
type=float,
default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.',
)
parser.add_argument(
'--max_seq_length',
default=3_8_4,
type=int,
help=(
'The maximum total input sequence length after WordPiece tokenization. Sequences '
'longer than this will be truncated, and sequences shorter than this will be padded.'
),
)
parser.add_argument(
'--doc_stride',
default=1_2_8,
type=int,
help='When splitting up a long document into chunks, how much stride to take between chunks.',
)
parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument(
'--n_best_size',
default=2_0,
type=int,
help='The total number of n-best predictions to generate in the nbest_predictions.json output file.',
)
parser.add_argument(
'--max_answer_length',
default=3_0,
type=int,
help=(
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
),
)
parser.add_argument('--seed', type=int, default=4_2, help='random seed for initialization')
parser.add_argument(
'--dataset_name',
type=str,
default=None,
required=True,
help='The name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--dataset_config_name',
type=str,
default=None,
help='The configuration name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.'
)
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument(
'--fp16',
action='store_true',
help='Whether to use 16-bit (mixed) precision instead of 32-bit',
)
parser.add_argument(
'--int8',
action='store_true',
help='Whether to use INT8',
)
UpperCAmelCase_ = parser.parse_args()
if args.tokenizer_name:
UpperCAmelCase_ = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.'
)
logger.info('Training/evaluation parameters %s', args)
UpperCAmelCase_ = args.per_device_eval_batch_size
UpperCAmelCase_ = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
UpperCAmelCase_ = True
UpperCAmelCase_ = 'temp_engine/bert-fp32.engine'
if args.fpaa:
UpperCAmelCase_ = 'temp_engine/bert-fp16.engine'
if args.inta:
UpperCAmelCase_ = 'temp_engine/bert-int8.engine'
# import ONNX file
if not os.path.exists('temp_engine'):
os.makedirs('temp_engine')
UpperCAmelCase_ = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
UpperCAmelCase_ = [network.get_input(i) for i in range(network.num_inputs)]
UpperCAmelCase_ = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
UpperCAmelCase_ = 1 << 5_0
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
UpperCAmelCase_ = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
UpperCAmelCase_ = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, 'wb') as f:
f.write(engine.serialize())
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ = np.asarray(inputs["""input_ids"""] , dtype=np.intaa )
UpperCAmelCase__ = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa )
UpperCAmelCase__ = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , SCREAMING_SNAKE_CASE__ )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , SCREAMING_SNAKE_CASE__ )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , SCREAMING_SNAKE_CASE__ )
# start time
UpperCAmelCase__ = time.time()
# Run inference
context.execute_async(
bindings=[int(SCREAMING_SNAKE_CASE__ ) for d_inp in d_inputs] + [int(SCREAMING_SNAKE_CASE__ ), int(SCREAMING_SNAKE_CASE__ )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
cuda.memcpy_dtoh_async(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Synchronize the stream and take time
stream.synchronize()
# end time
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = end_time - start_time
UpperCAmelCase__ = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
UpperCAmelCase_ = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
UpperCAmelCase_ = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('Evaluation requires a dataset name')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
UpperCAmelCase_ = raw_datasets['validation'].column_names
UpperCAmelCase_ = 'question' if 'question' in column_names else column_names[0]
UpperCAmelCase_ = 'context' if 'context' in column_names else column_names[1]
UpperCAmelCase_ = 'answers' if 'answers' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
UpperCAmelCase_ = tokenizer.padding_side == 'right'
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
UpperCAmelCase_ = min(args.max_seq_length, tokenizer.model_max_length)
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
UpperCAmelCase__ = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
UpperCAmelCase__ = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=SCREAMING_SNAKE_CASE__ , stride=args.doc_stride , return_overflowing_tokens=SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
UpperCAmelCase__ = tokenized_examples.pop("""overflow_to_sample_mapping""" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
UpperCAmelCase__ = []
for i in range(len(tokenized_examples["""input_ids"""] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
UpperCAmelCase__ = tokenized_examples.sequence_ids(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
UpperCAmelCase__ = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
UpperCAmelCase__ = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] )
]
return tokenized_examples
UpperCAmelCase_ = raw_datasets['validation']
# Validation Feature Creation
UpperCAmelCase_ = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='Running tokenizer on validation dataset',
)
UpperCAmelCase_ = default_data_collator
UpperCAmelCase_ = eval_dataset.remove_columns(['example_id', 'offset_mapping'])
UpperCAmelCase_ = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any]="eval" ):
'''simple docstring'''
UpperCAmelCase__ = postprocess_qa_predictions(
examples=SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , predictions=SCREAMING_SNAKE_CASE__ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=SCREAMING_SNAKE_CASE__ , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
UpperCAmelCase__ = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
UpperCAmelCase__ = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
UpperCAmelCase__ = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=SCREAMING_SNAKE_CASE__ , label_ids=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ = load_metric('squad_v2' if args.version_2_with_negative else 'squad')
# Evaluation!
logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path)
with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
return trt.volume(engine.get_binding_shape(SCREAMING_SNAKE_CASE__ ) ) * engine.get_binding_dtype(SCREAMING_SNAKE_CASE__ ).itemsize
# Allocate device memory for inputs and outputs.
UpperCAmelCase_ = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
UpperCAmelCase_ = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
UpperCAmelCase_ = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
UpperCAmelCase_ = cuda.mem_alloc(h_outputa.nbytes)
UpperCAmelCase_ = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
UpperCAmelCase_ = cuda.Stream()
# Evaluation
logger.info('***** Running Evaluation *****')
logger.info(f" Num examples = {len(eval_dataset)}")
logger.info(f" Batch size = {args.per_device_eval_batch_size}")
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = 0
UpperCAmelCase_ = timeit.default_timer()
UpperCAmelCase_ = None
for step, batch in enumerate(eval_dataloader):
UpperCAmelCase_ , UpperCAmelCase_ = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
UpperCAmelCase_ , UpperCAmelCase_ = outputs
UpperCAmelCase_ = torch.tensor(start_logits)
UpperCAmelCase_ = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
UpperCAmelCase_ = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_0_0)
UpperCAmelCase_ = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_0_0)
UpperCAmelCase_ = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
UpperCAmelCase_ = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_0_0)
if all_preds is not None:
UpperCAmelCase_ = nested_truncate(all_preds, len(eval_dataset))
UpperCAmelCase_ = timeit.default_timer() - start_time
logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_0_0_0 / niter))
logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_0_0_0))
logger.info('Total Number of Inference = %d', niter)
UpperCAmelCase_ = post_processing_function(eval_examples, eval_dataset, all_preds)
UpperCAmelCase_ = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f"Evaluation metrics: {eval_metric}")
| 346 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : List[str] , *_UpperCAmelCase : Optional[int] , **_UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
warnings.warn(
"""The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DeiTImageProcessor instead.""" , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 346 | 1 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
UpperCAmelCase__ = logging.get_logger(__name__)
class lowercase_ ( lowercase ):
'''simple docstring'''
def __init__( self : str , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Tuple ) ->None:
"""simple docstring"""
warnings.warn(
'''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use MobileViTImageProcessor instead.''' , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
| 353 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger("transformers.models.speecht5")
def _a ( a :Optional[Any] , a :Tuple , a :Dict ) -> List[str]:
hf_model.apply_weight_norm()
a = checkpoint['''input_conv.weight_g''']
a = checkpoint['''input_conv.weight_v''']
a = checkpoint['''input_conv.bias''']
for i in range(len(config.upsample_rates ) ):
a = checkpoint[F"""upsamples.{i}.1.weight_g"""]
a = checkpoint[F"""upsamples.{i}.1.weight_v"""]
a = checkpoint[F"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
a = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_g"""]
a = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_v"""]
a = checkpoint[F"""blocks.{i}.convs1.{j}.1.bias"""]
a = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_g"""]
a = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_v"""]
a = checkpoint[F"""blocks.{i}.convs2.{j}.1.bias"""]
a = checkpoint['''output_conv.1.weight_g''']
a = checkpoint['''output_conv.1.weight_v''']
a = checkpoint['''output_conv.1.bias''']
hf_model.remove_weight_norm()
@torch.no_grad()
def _a ( a :List[str] , a :Union[str, Any] , a :Dict , a :Dict=None , a :List[Any]=None , ) -> int:
if config_path is not None:
a = SpeechTaHifiGanConfig.from_pretrained(a )
else:
a = SpeechTaHifiGanConfig()
a = SpeechTaHifiGan(a )
a = torch.load(a )
load_weights(orig_checkpoint['''model''']['''generator'''] , a , a )
a = np.load(a )
a = stats[0].reshape(-1 )
a = stats[1].reshape(-1 )
a = torch.from_numpy(a ).float()
a = torch.from_numpy(a ).float()
model.save_pretrained(a )
if repo_id:
print('''Pushing to the hub...''' )
model.push_to_hub(a )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
UpperCAmelCase__ = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 26 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
SCREAMING_SNAKE_CASE__ : Optional[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
SCREAMING_SNAKE_CASE__ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 48 |
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
SCREAMING_SNAKE_CASE__ : Dict = logging.getLogger(__name__)
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = """sequence-classification"""
def __init__( self , UpperCamelCase__ ) -> List[Any]:
if type(UpperCamelCase__ ) == dict:
lowerCamelCase : int = Namespace(**UpperCamelCase__ )
lowerCamelCase : str = glue_output_modes[hparams.task]
lowerCamelCase : int = glue_tasks_num_labels[hparams.task]
super().__init__(UpperCamelCase__ , UpperCamelCase__ , self.mode )
def _lowercase ( self , **UpperCamelCase__ ) -> Tuple:
return self.model(**UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
lowerCamelCase : Union[str, Any] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowerCamelCase : List[str] = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
lowerCamelCase : Optional[int] = self(**UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = outputs[0]
lowerCamelCase : str = self.trainer.lr_schedulers[0]["scheduler"]
lowerCamelCase : Optional[int] = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def _lowercase ( self ) -> str:
lowerCamelCase : Any = self.hparams
lowerCamelCase : Union[str, Any] = processors[args.task]()
lowerCamelCase : Optional[int] = processor.get_labels()
for mode in ["train", "dev"]:
lowerCamelCase : Optional[Any] = self._feature_file(UpperCamelCase__ )
if os.path.exists(UpperCamelCase__ ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , UpperCamelCase__ )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
lowerCamelCase : List[str] = (
processor.get_dev_examples(args.data_dir )
if mode == "dev"
else processor.get_train_examples(args.data_dir )
)
lowerCamelCase : Dict = convert_examples_to_features(
UpperCamelCase__ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("Saving features into cached file %s" , UpperCamelCase__ )
torch.save(UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False ) -> DataLoader:
lowerCamelCase : str = "dev" if mode == "test" else mode
lowerCamelCase : int = self._feature_file(UpperCamelCase__ )
logger.info("Loading features from cached file %s" , UpperCamelCase__ )
lowerCamelCase : str = torch.load(UpperCamelCase__ )
lowerCamelCase : List[str] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowerCamelCase : str = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
lowerCamelCase : List[str] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
lowerCamelCase : Any = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
lowerCamelCase : Union[str, Any] = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , batch_size=UpperCamelCase__ , shuffle=UpperCamelCase__ , )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
lowerCamelCase : Dict = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowerCamelCase : Tuple = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
lowerCamelCase : Dict = self(**UpperCamelCase__ )
lowerCamelCase , lowerCamelCase : Any = outputs[:2]
lowerCamelCase : Union[str, Any] = logits.detach().cpu().numpy()
lowerCamelCase : Optional[Any] = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _lowercase ( self , UpperCamelCase__ ) -> tuple:
lowerCamelCase : Union[str, Any] = torch.stack([x["val_loss"] for x in outputs] ).mean().detach().cpu().item()
lowerCamelCase : Optional[int] = np.concatenate([x["pred"] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
lowerCamelCase : Union[str, Any] = np.argmax(UpperCamelCase__ , axis=1 )
elif self.hparams.glue_output_mode == "regression":
lowerCamelCase : str = np.squeeze(UpperCamelCase__ )
lowerCamelCase : List[Any] = np.concatenate([x["target"] for x in outputs] , axis=0 )
lowerCamelCase : List[str] = [[] for _ in range(out_label_ids.shape[0] )]
lowerCamelCase : Optional[int] = [[] for _ in range(out_label_ids.shape[0] )]
lowerCamelCase : Dict = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task , UpperCamelCase__ , UpperCamelCase__ )}
lowerCamelCase : List[str] = dict(results.items() )
lowerCamelCase : Optional[int] = results
return ret, preds_list, out_label_list
def _lowercase ( self , UpperCamelCase__ ) -> dict:
lowerCamelCase , lowerCamelCase , lowerCamelCase : Union[str, Any] = self._eval_end(UpperCamelCase__ )
lowerCamelCase : str = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _lowercase ( self , UpperCamelCase__ ) -> dict:
lowerCamelCase , lowerCamelCase , lowerCamelCase : str = self._eval_end(UpperCamelCase__ )
lowerCamelCase : str = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _lowercase ( UpperCamelCase__ , UpperCamelCase__ ) -> int:
BaseTransformer.add_model_specific_args(UpperCamelCase__ , UpperCamelCase__ )
parser.add_argument(
"--max_seq_length" , default=128 , type=UpperCamelCase__ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--task" , default="" , type=UpperCamelCase__ , required=UpperCamelCase__ , help="The GLUE task to run" , )
parser.add_argument(
"--gpus" , default=0 , type=UpperCamelCase__ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
def A ( ) -> int:
lowerCamelCase : int = argparse.ArgumentParser()
add_generic_args(_SCREAMING_SNAKE_CASE ,os.getcwd() )
lowerCamelCase : str = GLUETransformer.add_model_specific_args(_SCREAMING_SNAKE_CASE ,os.getcwd() )
lowerCamelCase : str = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
lowerCamelCase : int = os.path.join(
"./results" ,f'''{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}''' ,)
os.makedirs(args.output_dir )
lowerCamelCase : int = GLUETransformer(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Dict = generic_train(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
lowerCamelCase : Optional[int] = sorted(glob.glob(os.path.join(args.output_dir ,"checkpoint-epoch=*.ckpt" ) ,recursive=_SCREAMING_SNAKE_CASE ) )
lowerCamelCase : Tuple = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 48 | 1 |
'''simple docstring'''
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class _lowerCAmelCase :
'''simple docstring'''
pass | 363 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowerCAmelCase = {
'configuration_efficientformer': [
'EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientFormerConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ['EfficientFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientFormerForImageClassification',
'EfficientFormerForImageClassificationWithTeacher',
'EfficientFormerModel',
'EfficientFormerPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFEfficientFormerForImageClassification',
'TFEfficientFormerForImageClassificationWithTeacher',
'TFEfficientFormerModel',
'TFEfficientFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 270 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
_snake_case = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
sd_pipe.set_scheduler('sample_euler' )
_snake_case = 'A painting of a squirrel eating a burger'
_snake_case = torch.manual_seed(0 )
_snake_case = sd_pipe([prompt] , generator=lowerCAmelCase_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='np' )
_snake_case = output.images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_snake_case = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
_snake_case = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
sd_pipe.set_scheduler('sample_euler' )
_snake_case = 'A painting of a squirrel eating a burger'
_snake_case = torch.manual_seed(0 )
_snake_case = sd_pipe([prompt] , generator=lowerCAmelCase_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='np' )
_snake_case = output.images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_snake_case = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
_snake_case = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
sd_pipe.set_scheduler('sample_dpmpp_2m' )
_snake_case = 'A painting of a squirrel eating a burger'
_snake_case = torch.manual_seed(0 )
_snake_case = sd_pipe(
[prompt] , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=15 , output_type='np' , use_karras_sigmas=lowerCAmelCase_ , )
_snake_case = output.images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_snake_case = np.array(
[0.11381689, 0.12112921, 0.1389457, 0.12549606, 0.1244964, 0.10831517, 0.11562866, 0.10867816, 0.10499048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 42 |
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def A ( self : int ):
'''simple docstring'''
_snake_case = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
_snake_case = AutoTokenizer.from_pretrained('xlm-roberta-base' )
_snake_case = 'The dog is cute and lives in the garden house'
_snake_case = jnp.array([tokenizer.encode(lowercase )] )
_snake_case = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
_snake_case = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
_snake_case = model(lowercase )['last_hidden_state']
self.assertEqual(output.shape , lowercase )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , lowercase , atol=1E-3 ) ) | 282 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=12 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=32 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=0 , __UpperCAmelCase=None , ) -> Dict:
_lowerCAmelCase =parent
_lowerCAmelCase =batch_size
_lowerCAmelCase =seq_length
_lowerCAmelCase =is_training
_lowerCAmelCase =use_input_mask
_lowerCAmelCase =use_labels
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =projection_dim
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =dropout
_lowerCAmelCase =attention_dropout
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =initializer_range
_lowerCAmelCase =scope
_lowerCAmelCase =bos_token_id
def _lowerCAmelCase ( self ) -> str:
_lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase =None
if self.use_input_mask:
_lowerCAmelCase =random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
_lowerCAmelCase =input_mask.numpy()
_lowerCAmelCase , _lowerCAmelCase =input_mask.shape
_lowerCAmelCase =np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__UpperCAmelCase ):
_lowerCAmelCase =1
_lowerCAmelCase =0
_lowerCAmelCase =self.get_config()
return config, input_ids, tf.convert_to_tensor(__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> Optional[int]:
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
_lowerCAmelCase =TFBlipTextModel(config=__UpperCAmelCase )
_lowerCAmelCase =model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , training=__UpperCAmelCase )
_lowerCAmelCase =model(__UpperCAmelCase , training=__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowerCAmelCase ( self ) -> str:
_lowerCAmelCase =self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =config_and_inputs
_lowerCAmelCase ={"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = (TFBlipTextModel,) if is_tf_available() else ()
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def _lowerCAmelCase ( self ) -> List[str]:
_lowerCAmelCase =BlipTextModelTester(self )
_lowerCAmelCase =ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def _lowerCAmelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ) -> List[Any]:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> Tuple:
pass
def _lowerCAmelCase ( self ) -> Optional[Any]:
pass
@unittest.skip(reason="""Blip does not use inputs_embeds""" )
def _lowerCAmelCase ( self ) -> Dict:
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def _lowerCAmelCase ( self ) -> int:
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def _lowerCAmelCase ( self ) -> int:
pass
@slow
def _lowerCAmelCase ( self ) -> Any:
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase =TFBlipTextModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase=True ) -> List[Any]:
super().test_pt_tf_model_equivalence(allow_missing_keys=__UpperCAmelCase )
| 341 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__A = {
'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'],
'tokenization_perceiver': ['PerceiverTokenizer'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['PerceiverFeatureExtractor']
__A = ['PerceiverImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PerceiverForImageClassificationConvProcessing',
'PerceiverForImageClassificationFourier',
'PerceiverForImageClassificationLearned',
'PerceiverForMaskedLM',
'PerceiverForMultimodalAutoencoding',
'PerceiverForOpticalFlow',
'PerceiverForSequenceClassification',
'PerceiverLayer',
'PerceiverModel',
'PerceiverPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 341 | 1 |
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
@staticmethod
@abstractmethod
def UpperCAmelCase_ ( __UpperCAmelCase ) -> Optional[Any]:
raise NotImplementedError()
@abstractmethod
def UpperCAmelCase_ ( self ) -> int:
raise NotImplementedError()
| 37 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_5_0, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_ddp.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf_dist.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.6, '''eval_loss''': 0.7},
},
] )
class lowerCAmelCase_( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self ) -> Optional[Any]:
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() ,encoding="""utf-8""" ,check=__UpperCAmelCase ,)
assert hasattr(self ,"""env""" )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[Any]:
lowerCAmelCase__ : Optional[int] = F"""{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"""
# distributed data settings
lowerCAmelCase__ : Any = {"""smdistributed""": {"""dataparallel""": {"""enabled""": True}}} if self.script != """run_ddp.py""" else None
# creates estimator
return HuggingFace(
entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=__UpperCAmelCase ,instance_count=__UpperCAmelCase ,instance_type=self.instance_type ,debugger_hook_config=__UpperCAmelCase ,hyperparameters={**self.env.distributed_hyperparameters, """model_name_or_path""": self.model_name_or_path} ,metric_definitions=self.env.metric_definitions ,distribution=__UpperCAmelCase ,py_version="""py36""" ,)
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[Any]:
TrainingJobAnalytics(__UpperCAmelCase ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Any:
# create estimator
lowerCAmelCase__ : List[Any] = self.create_estimator(__UpperCAmelCase )
# run training
estimator.fit()
# result dataframe
lowerCAmelCase__ : Any = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCAmelCase__ : int = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
lowerCAmelCase__ : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCAmelCase__ : List[str] = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" ,99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" ,"""w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} ,__UpperCAmelCase )
| 37 | 1 |
def __a ( lowerCAmelCase_ : int ) -> None:
'''simple docstring'''
UpperCAmelCase_= generate_pascal_triangle(lowerCAmelCase_ )
for row_idx in range(lowerCAmelCase_ ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=""" """ )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] ,end=""" """ )
else:
print(triangle[row_idx][col_idx] ,end="""""" )
print()
def __a ( lowerCAmelCase_ : int ) -> list[list[int]]:
'''simple docstring'''
if not isinstance(lowerCAmelCase_ ,lowerCAmelCase_ ):
raise TypeError("""The input value of 'num_rows' should be 'int'""" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of 'num_rows' should be greater than or equal to 0""" )
UpperCAmelCase_= []
for current_row_idx in range(lowerCAmelCase_ ):
UpperCAmelCase_= populate_current_row(lowerCAmelCase_ ,lowerCAmelCase_ )
triangle.append(lowerCAmelCase_ )
return triangle
def __a ( lowerCAmelCase_ : list[list[int]] ,lowerCAmelCase_ : int ) -> list[int]:
'''simple docstring'''
UpperCAmelCase_= [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
UpperCAmelCase_, UpperCAmelCase_= 1, 1
for current_col_idx in range(1 ,lowerCAmelCase_ ):
calculate_current_element(
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ )
return current_row
def __a ( lowerCAmelCase_ : list[list[int]] ,lowerCAmelCase_ : list[int] ,lowerCAmelCase_ : int ,lowerCAmelCase_ : int ,) -> None:
'''simple docstring'''
UpperCAmelCase_= triangle[current_row_idx - 1][current_col_idx - 1]
UpperCAmelCase_= triangle[current_row_idx - 1][current_col_idx]
UpperCAmelCase_= above_to_left_elt + above_to_right_elt
def __a ( lowerCAmelCase_ : int ) -> list[list[int]]:
'''simple docstring'''
if not isinstance(lowerCAmelCase_ ,lowerCAmelCase_ ):
raise TypeError("""The input value of 'num_rows' should be 'int'""" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of 'num_rows' should be greater than or equal to 0""" )
UpperCAmelCase_= [[1]]
for row_index in range(1 ,lowerCAmelCase_ ):
UpperCAmelCase_= [0] + result[-1] + [0]
UpperCAmelCase_= row_index + 1
# Calculate the number of distinct elements in a row
UpperCAmelCase_= sum(divmod(lowerCAmelCase_ ,2 ) )
UpperCAmelCase_= [
temp_row[i - 1] + temp_row[i] for i in range(1 ,distinct_elements + 1 )
]
UpperCAmelCase_= row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
UpperCAmelCase_= row_first_half + row_second_half
result.append(lowerCAmelCase_ )
return result
def __a ( ) -> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowerCAmelCase_ : Callable ,lowerCAmelCase_ : int ) -> None:
UpperCAmelCase_= F"""{func.__name__}({value})"""
UpperCAmelCase_= timeit(F"""__main__.{call}""" ,setup="""import __main__""" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F"""{call:38} -- {timing:.4f} seconds""" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(lowerCAmelCase_ ,lowerCAmelCase_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 277 |
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class lowercase ( snake_case__):
"""simple docstring"""
a__ : str = ["vqvae"]
def __init__( self : List[Any] , __UpperCAmelCase : AutoencoderKL , __UpperCAmelCase : UNetaDConditionModel , __UpperCAmelCase : Mel , __UpperCAmelCase : Union[DDIMScheduler, DDPMScheduler] , ) -> str:
super().__init__()
self.register_modules(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase , mel=__UpperCAmelCase , vqvae=__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
return 50 if isinstance(self.scheduler , __UpperCAmelCase ) else 1_000
@torch.no_grad()
def __call__( self : List[Any] , __UpperCAmelCase : int = 1 , __UpperCAmelCase : str = None , __UpperCAmelCase : np.ndarray = None , __UpperCAmelCase : int = 0 , __UpperCAmelCase : int = 0 , __UpperCAmelCase : int = None , __UpperCAmelCase : torch.Generator = None , __UpperCAmelCase : float = 0 , __UpperCAmelCase : float = 0 , __UpperCAmelCase : torch.Generator = None , __UpperCAmelCase : float = 0 , __UpperCAmelCase : torch.Tensor = None , __UpperCAmelCase : torch.Tensor = None , __UpperCAmelCase : Union[str, Any]=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
UpperCAmelCase_= steps or self.get_default_steps()
self.scheduler.set_timesteps(__UpperCAmelCase )
UpperCAmelCase_= step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
UpperCAmelCase_= (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
UpperCAmelCase_= randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=__UpperCAmelCase , device=self.device , )
UpperCAmelCase_= noise
UpperCAmelCase_= None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase_= self.mel.audio_slice_to_image(__UpperCAmelCase )
UpperCAmelCase_= np.frombuffer(input_image.tobytes() , dtype="""uint8""" ).reshape(
(input_image.height, input_image.width) )
UpperCAmelCase_= (input_image / 255) * 2 - 1
UpperCAmelCase_= torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
UpperCAmelCase_= self.vqvae.encode(torch.unsqueeze(__UpperCAmelCase , 0 ) ).latent_dist.sample(
generator=__UpperCAmelCase )[0]
UpperCAmelCase_= self.vqvae.config.scaling_factor * input_images
if start_step > 0:
UpperCAmelCase_= self.scheduler.add_noise(__UpperCAmelCase , __UpperCAmelCase , self.scheduler.timesteps[start_step - 1] )
UpperCAmelCase_= (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
UpperCAmelCase_= int(mask_start_secs * pixels_per_second )
UpperCAmelCase_= int(mask_end_secs * pixels_per_second )
UpperCAmelCase_= self.scheduler.add_noise(__UpperCAmelCase , __UpperCAmelCase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , __UpperCAmelCase ):
UpperCAmelCase_= self.unet(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )["""sample"""]
else:
UpperCAmelCase_= self.unet(__UpperCAmelCase , __UpperCAmelCase )["""sample"""]
if isinstance(self.scheduler , __UpperCAmelCase ):
UpperCAmelCase_= self.scheduler.step(
model_output=__UpperCAmelCase , timestep=__UpperCAmelCase , sample=__UpperCAmelCase , eta=__UpperCAmelCase , generator=__UpperCAmelCase , )["""prev_sample"""]
else:
UpperCAmelCase_= self.scheduler.step(
model_output=__UpperCAmelCase , timestep=__UpperCAmelCase , sample=__UpperCAmelCase , generator=__UpperCAmelCase , )["""prev_sample"""]
if mask is not None:
if mask_start > 0:
UpperCAmelCase_= mask[:, step, :, :mask_start]
if mask_end > 0:
UpperCAmelCase_= mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
UpperCAmelCase_= 1 / self.vqvae.config.scaling_factor * images
UpperCAmelCase_= self.vqvae.decode(__UpperCAmelCase )["""sample"""]
UpperCAmelCase_= (images / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase_= images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
UpperCAmelCase_= (images * 255).round().astype("""uint8""" )
UpperCAmelCase_= list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(__UpperCAmelCase , mode="""RGB""" ).convert("""L""" ) for _ in images) )
UpperCAmelCase_= [self.mel.image_to_audio(__UpperCAmelCase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(__UpperCAmelCase )[:, np.newaxis, :] ) , **ImagePipelineOutput(__UpperCAmelCase ) )
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( self : List[str] , __UpperCAmelCase : List[Image.Image] , __UpperCAmelCase : int = 50 ) -> np.ndarray:
assert isinstance(self.scheduler , __UpperCAmelCase )
self.scheduler.set_timesteps(__UpperCAmelCase )
UpperCAmelCase_= np.array(
[np.frombuffer(image.tobytes() , dtype="""uint8""" ).reshape((1, image.height, image.width) ) for image in images] )
UpperCAmelCase_= (sample / 255) * 2 - 1
UpperCAmelCase_= torch.Tensor(__UpperCAmelCase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
UpperCAmelCase_= t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
UpperCAmelCase_= self.scheduler.alphas_cumprod[t]
UpperCAmelCase_= (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
UpperCAmelCase_= 1 - alpha_prod_t
UpperCAmelCase_= self.unet(__UpperCAmelCase , __UpperCAmelCase )["""sample"""]
UpperCAmelCase_= (1 - alpha_prod_t_prev) ** 0.5 * model_output
UpperCAmelCase_= (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
UpperCAmelCase_= sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def _SCREAMING_SNAKE_CASE ( __UpperCAmelCase : torch.Tensor , __UpperCAmelCase : torch.Tensor , __UpperCAmelCase : float ) -> torch.Tensor:
UpperCAmelCase_= acos(torch.dot(torch.flatten(__UpperCAmelCase ) , torch.flatten(__UpperCAmelCase ) ) / torch.norm(__UpperCAmelCase ) / torch.norm(__UpperCAmelCase ) )
return sin((1 - alpha) * theta ) * xa / sin(__UpperCAmelCase ) + sin(alpha * theta ) * xa / sin(__UpperCAmelCase )
| 277 | 1 |
import math
class lowerCamelCase__ :
'''simple docstring'''
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ) -> int:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = 0.0
lowerCAmelCase__ : Union[str, Any] = 0.0
for i in range(len(__lowerCamelCase ) ):
da += math.pow((sample[i] - weights[0][i]) ,2 )
da += math.pow((sample[i] - weights[1][i]) ,2 )
return 0 if da > da else 1
return 0
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> list[list[int | float]]:
"""simple docstring"""
for i in range(len(__lowerCamelCase ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def lowerCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase__ : int = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
lowerCAmelCase__ : Any = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
lowerCAmelCase__ : Tuple = SelfOrganizingMap()
lowerCAmelCase__ : str = 3
lowerCAmelCase__ : Union[str, Any] = 0.5
for _ in range(lowerCamelCase_):
for j in range(len(lowerCamelCase_)):
# training sample
lowerCAmelCase__ : List[Any] = training_samples[j]
# Compute the winning vector
lowerCAmelCase__ : List[Any] = self_organizing_map.get_winner(lowerCamelCase_ ,lowerCamelCase_)
# Update the winning vector
lowerCAmelCase__ : List[Any] = self_organizing_map.update(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_)
# classify test sample
lowerCAmelCase__ : Optional[int] = [0, 0, 0, 1]
lowerCAmelCase__ : Tuple = self_organizing_map.get_winner(lowerCamelCase_ ,lowerCamelCase_)
# results
print(f"""Clusters that the test sample belongs to : {winner}""")
print(f"""Weights that have been trained : {weights}""")
# running the main() function
if __name__ == "__main__":
main()
| 129 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def lowerCAmelCase__ ( lowerCamelCase_ : str ,lowerCamelCase_ : Any):
'''simple docstring'''
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
lowerCAmelCase__ : Tuple = flax_key_tuple[:-1] + ('''weight''',)
lowerCAmelCase__ : int = torch.permute(lowerCamelCase_ ,(0, 2, 1))
elif flax_key_tuple[-1] == "kernel" and ".".join(lowerCamelCase_):
# linear layer
lowerCAmelCase__ : List[Any] = flax_key_tuple[:-1] + ('''weight''',)
lowerCAmelCase__ : str = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
lowerCAmelCase__ : Union[str, Any] = flax_key_tuple[:-1] + ('''weight''',)
return flax_key_tuple, flax_tensor
def lowerCAmelCase__ ( lowerCamelCase_ : Union[str, Any] ,lowerCamelCase_ : int ,lowerCamelCase_ : str):
'''simple docstring'''
if "metadata" in layer:
lowerCAmelCase__ : Optional[Any] = layer.split('''metadata''')
lowerCAmelCase__ : int = ''''''.join(split_layer[0])[:-1]
lowerCAmelCase__ : Optional[int] = [tuple(('''metadata''' + split_layer[1]).split('''/'''))]
elif "kvstore" in layer:
lowerCAmelCase__ : Optional[int] = layer.split('''kvstore''')
lowerCAmelCase__ : Optional[Any] = ''''''.join(split_layer[0])[:-1]
lowerCAmelCase__ : Tuple = [tuple(('''kvstore''' + split_layer[1]).split('''/'''))]
else:
lowerCAmelCase__ : List[str] = layer.split('''/''')
lowerCAmelCase__ : int = '''/'''.join(split_layer[:-1])
lowerCAmelCase__ : List[str] = (split_layer[-1],)
if "kvstore/path" in layer:
lowerCAmelCase__ : Optional[Any] = f"""{switch_checkpoint_path}/{checkpoint_info[layer]}"""
elif "kvstore/driver" in layer:
lowerCAmelCase__ : Dict = '''file'''
else:
lowerCAmelCase__ : Optional[Any] = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def lowerCAmelCase__ ( lowerCamelCase_ : List[str] ,lowerCamelCase_ : Optional[int]):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = rename_keys(lowerCamelCase_)
lowerCAmelCase__ : List[Any] = {}
for k, v in current_block.items():
lowerCAmelCase__ : List[Any] = v
lowerCAmelCase__ : Tuple = new_current_block
torch.save(lowerCamelCase_ ,lowerCamelCase_)
def lowerCAmelCase__ ( lowerCamelCase_ : Dict ,lowerCamelCase_ : Dict ,lowerCamelCase_ : Optional[int] ,lowerCamelCase_ : Dict ,lowerCamelCase_ : str = WEIGHTS_NAME):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = convert_file_size_to_int(lowerCamelCase_)
lowerCAmelCase__ : Dict = []
lowerCAmelCase__ : Optional[int] = {}
lowerCAmelCase__ : Optional[Any] = 0
lowerCAmelCase__ : List[str] = 0
os.makedirs(lowerCamelCase_ ,exist_ok=lowerCamelCase_)
with gfile.GFile(switch_checkpoint_path + '''/checkpoint''' ,'''rb''') as fp:
lowerCAmelCase__ : str = serialization.msgpack_restore(fp.read())['''optimizer''']['''target''']
lowerCAmelCase__ : int = flatten_dict(lowerCamelCase_ ,sep='''/''')
lowerCAmelCase__ : str = {}
for layer in checkpoint_info.keys():
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : int = get_key_and_tensorstore_dict(
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_)
if curr_real_layer_name in all_layers:
lowerCAmelCase__ : List[Any] = content
else:
lowerCAmelCase__ : str = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
lowerCAmelCase__ : str = ts.open(unflatten_dict(all_layers[key])).result().read().result()
lowerCAmelCase__ : str = torch.tensor(lowerCamelCase_)
lowerCAmelCase__ : Dict = raw_weights.numel() * dtype_byte_size(raw_weights.dtype)
# use the renaming pattern from the small conversion scripts
lowerCAmelCase__ , lowerCAmelCase__ : int = rename_base_flax_keys(tuple(key.split('''/''')) ,lowerCamelCase_)
lowerCAmelCase__ : List[str] = '''/'''.join(lowerCamelCase_)
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
lowerCAmelCase__ : Union[str, Any] = os.path.join(
lowerCamelCase_ ,weights_name.replace('''.bin''' ,f"""-{len(lowerCamelCase_)+1:05d}-of-???.bin"""))
rename_and_save_block(lowerCamelCase_ ,lowerCamelCase_)
sharded_state_dicts.append(current_block.keys())
del current_block
lowerCAmelCase__ : str = {}
lowerCAmelCase__ : Union[str, Any] = 0
lowerCAmelCase__ : str = raw_weights.to(getattr(lowerCamelCase_ ,lowerCamelCase_))
current_block_size += weight_size
total_size += weight_size
# Add the last block
lowerCAmelCase__ : List[str] = os.path.join(lowerCamelCase_ ,weights_name.replace('''.bin''' ,f"""-{len(lowerCamelCase_)+1:05d}-of-???.bin"""))
rename_and_save_block(lowerCamelCase_ ,lowerCamelCase_)
sharded_state_dicts.append(current_block.keys())
# If we only have one shard, we return it
if len(lowerCamelCase_) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
lowerCAmelCase__ : Union[str, Any] = {}
lowerCAmelCase__ : Tuple = {}
for idx, shard in enumerate(lowerCamelCase_):
lowerCAmelCase__ : List[str] = weights_name.replace(
'''.bin''' ,f"""-{idx+1:05d}-of-{len(lowerCamelCase_):05d}.bin""") # len(sharded_state_dicts):05d}
lowerCAmelCase__ : Union[str, Any] = os.path.join(lowerCamelCase_ ,weights_name.replace('''.bin''' ,f"""-{idx+1:05d}-of-???.bin"""))
os.rename(lowerCamelCase_ ,os.path.join(lowerCamelCase_ ,lowerCamelCase_))
lowerCAmelCase__ : List[Any] = shard
for key in shard:
lowerCAmelCase__ : Dict = shard_file
# Add the metadata
lowerCAmelCase__ : Optional[Any] = {'''total_size''': total_size}
lowerCAmelCase__ : Optional[Any] = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(lowerCamelCase_ ,lowerCamelCase_) ,'''w''' ,encoding='''utf-8''') as f:
lowerCAmelCase__ : List[Any] = json.dumps(lowerCamelCase_ ,indent=2 ,sort_keys=lowerCamelCase_) + '''\n'''
f.write(lowerCamelCase_)
return metadata, index
if __name__ == "__main__":
__snake_case : List[str] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
__snake_case : Dict =parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def lowerCAmelCase__ ( ):
'''simple docstring'''
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
lowerCAmelCase__ : Optional[Any] = SwitchTransformersConfig.from_pretrained('''google/switch-base-8''')
config.save_pretrained('''/home/arthur_huggingface_co/transformers/switch_converted''')
lowerCAmelCase__ : Union[str, Any] = SwitchTransformersForConditionalGeneration.from_pretrained(
'''/home/arthur_huggingface_co/transformers/switch_converted''' ,device_map='''auto''')
lowerCAmelCase__ : Optional[Any] = TaTokenizer.from_pretrained('''t5-small''')
lowerCAmelCase__ : Any = '''A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'''
lowerCAmelCase__ : Union[str, Any] = tokenizer(lowerCamelCase_ ,return_tensors='''pt''').input_ids
lowerCAmelCase__ : Tuple = model.generate(lowerCamelCase_ ,decoder_start_token_id=0)
print(tokenizer.decode(out[0]))
| 129 | 1 |
from __future__ import annotations
from math import gcd
def __lowercase ( a__ , a__ = 2 , a__ = 1 , a__ = 3 , ) -> int | None:
# A value less than 2 can cause an infinite loop in the algorithm.
if num < 2:
raise ValueError('The input value cannot be less than 2' )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(a__ , a__ , a__ ) -> int:
return (pow(a__ , 2 ) + step) % modulus
for _ in range(a__ ):
# These track the position within the cycle detection logic.
__SCREAMING_SNAKE_CASE = seed
__SCREAMING_SNAKE_CASE = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
__SCREAMING_SNAKE_CASE = rand_fn(a__ , a__ , a__ )
__SCREAMING_SNAKE_CASE = rand_fn(a__ , a__ , a__ )
__SCREAMING_SNAKE_CASE = rand_fn(a__ , a__ , a__ )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
__SCREAMING_SNAKE_CASE = gcd(hare - tortoise , a__ )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
__SCREAMING_SNAKE_CASE = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
lowerCAmelCase__ : Tuple =argparse.ArgumentParser()
parser.add_argument(
'''num''',
type=int,
help='''The value to find a divisor of''',
)
parser.add_argument(
'''--attempts''',
type=int,
default=3,
help='''The number of attempts before giving up''',
)
lowerCAmelCase__ : List[Any] =parser.parse_args()
lowerCAmelCase__ : Union[str, Any] =pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F'''{args.num} is probably prime''')
else:
lowerCAmelCase__ : Optional[int] =args.num // divisor
print(F'''{args.num} = {divisor} * {quotient}''')
| 118 |
from __future__ import annotations
from collections.abc import Generator
def __lowercase ( ) -> Generator[int, None, None]:
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = 2
while True:
__SCREAMING_SNAKE_CASE = factor_map.pop(a__ , a__ )
if factor:
__SCREAMING_SNAKE_CASE = factor + prime
while x in factor_map:
x += factor
__SCREAMING_SNAKE_CASE = factor
else:
__SCREAMING_SNAKE_CASE = prime
yield prime
prime += 1
def __lowercase ( a__ = 1E10 ) -> int:
__SCREAMING_SNAKE_CASE = sieve()
__SCREAMING_SNAKE_CASE = 1
while True:
__SCREAMING_SNAKE_CASE = next(a__ )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(a__ )
n += 2
if __name__ == "__main__":
print(solution())
| 118 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
UpperCAmelCase_ : Tuple = {
'Acehnese Arabic': 'ace_Arab',
'Acehnese Latin': 'ace_Latn',
'Mesopotamian Arabic': 'acm_Arab',
'Ta\'izzi-Adeni Arabic': 'acq_Arab',
'Tunisian Arabic': 'aeb_Arab',
'Afrikaans': 'afr_Latn',
'South Levantine Arabic': 'ajp_Arab',
'Akan': 'aka_Latn',
'Amharic': 'amh_Ethi',
'North Levantine Arabic': 'apc_Arab',
'Modern Standard Arabic': 'arb_Arab',
'Modern Standard Arabic Romanized': 'arb_Latn',
'Najdi Arabic': 'ars_Arab',
'Moroccan Arabic': 'ary_Arab',
'Egyptian Arabic': 'arz_Arab',
'Assamese': 'asm_Beng',
'Asturian': 'ast_Latn',
'Awadhi': 'awa_Deva',
'Central Aymara': 'ayr_Latn',
'South Azerbaijani': 'azb_Arab',
'North Azerbaijani': 'azj_Latn',
'Bashkir': 'bak_Cyrl',
'Bambara': 'bam_Latn',
'Balinese': 'ban_Latn',
'Belarusian': 'bel_Cyrl',
'Bemba': 'bem_Latn',
'Bengali': 'ben_Beng',
'Bhojpuri': 'bho_Deva',
'Banjar Arabic': 'bjn_Arab',
'Banjar Latin': 'bjn_Latn',
'Standard Tibetan': 'bod_Tibt',
'Bosnian': 'bos_Latn',
'Buginese': 'bug_Latn',
'Bulgarian': 'bul_Cyrl',
'Catalan': 'cat_Latn',
'Cebuano': 'ceb_Latn',
'Czech': 'ces_Latn',
'Chokwe': 'cjk_Latn',
'Central Kurdish': 'ckb_Arab',
'Crimean Tatar': 'crh_Latn',
'Welsh': 'cym_Latn',
'Danish': 'dan_Latn',
'German': 'deu_Latn',
'Southwestern Dinka': 'dik_Latn',
'Dyula': 'dyu_Latn',
'Dzongkha': 'dzo_Tibt',
'Greek': 'ell_Grek',
'English': 'eng_Latn',
'Esperanto': 'epo_Latn',
'Estonian': 'est_Latn',
'Basque': 'eus_Latn',
'Ewe': 'ewe_Latn',
'Faroese': 'fao_Latn',
'Fijian': 'fij_Latn',
'Finnish': 'fin_Latn',
'Fon': 'fon_Latn',
'French': 'fra_Latn',
'Friulian': 'fur_Latn',
'Nigerian Fulfulde': 'fuv_Latn',
'Scottish Gaelic': 'gla_Latn',
'Irish': 'gle_Latn',
'Galician': 'glg_Latn',
'Guarani': 'grn_Latn',
'Gujarati': 'guj_Gujr',
'Haitian Creole': 'hat_Latn',
'Hausa': 'hau_Latn',
'Hebrew': 'heb_Hebr',
'Hindi': 'hin_Deva',
'Chhattisgarhi': 'hne_Deva',
'Croatian': 'hrv_Latn',
'Hungarian': 'hun_Latn',
'Armenian': 'hye_Armn',
'Igbo': 'ibo_Latn',
'Ilocano': 'ilo_Latn',
'Indonesian': 'ind_Latn',
'Icelandic': 'isl_Latn',
'Italian': 'ita_Latn',
'Javanese': 'jav_Latn',
'Japanese': 'jpn_Jpan',
'Kabyle': 'kab_Latn',
'Jingpho': 'kac_Latn',
'Kamba': 'kam_Latn',
'Kannada': 'kan_Knda',
'Kashmiri Arabic': 'kas_Arab',
'Kashmiri Devanagari': 'kas_Deva',
'Georgian': 'kat_Geor',
'Central Kanuri Arabic': 'knc_Arab',
'Central Kanuri Latin': 'knc_Latn',
'Kazakh': 'kaz_Cyrl',
'Kabiyè': 'kbp_Latn',
'Kabuverdianu': 'kea_Latn',
'Khmer': 'khm_Khmr',
'Kikuyu': 'kik_Latn',
'Kinyarwanda': 'kin_Latn',
'Kyrgyz': 'kir_Cyrl',
'Kimbundu': 'kmb_Latn',
'Northern Kurdish': 'kmr_Latn',
'Kikongo': 'kon_Latn',
'Korean': 'kor_Hang',
'Lao': 'lao_Laoo',
'Ligurian': 'lij_Latn',
'Limburgish': 'lim_Latn',
'Lingala': 'lin_Latn',
'Lithuanian': 'lit_Latn',
'Lombard': 'lmo_Latn',
'Latgalian': 'ltg_Latn',
'Luxembourgish': 'ltz_Latn',
'Luba-Kasai': 'lua_Latn',
'Ganda': 'lug_Latn',
'Luo': 'luo_Latn',
'Mizo': 'lus_Latn',
'Standard Latvian': 'lvs_Latn',
'Magahi': 'mag_Deva',
'Maithili': 'mai_Deva',
'Malayalam': 'mal_Mlym',
'Marathi': 'mar_Deva',
'Minangkabau Arabic ': 'min_Arab',
'Minangkabau Latin': 'min_Latn',
'Macedonian': 'mkd_Cyrl',
'Plateau Malagasy': 'plt_Latn',
'Maltese': 'mlt_Latn',
'Meitei Bengali': 'mni_Beng',
'Halh Mongolian': 'khk_Cyrl',
'Mossi': 'mos_Latn',
'Maori': 'mri_Latn',
'Burmese': 'mya_Mymr',
'Dutch': 'nld_Latn',
'Norwegian Nynorsk': 'nno_Latn',
'Norwegian Bokmål': 'nob_Latn',
'Nepali': 'npi_Deva',
'Northern Sotho': 'nso_Latn',
'Nuer': 'nus_Latn',
'Nyanja': 'nya_Latn',
'Occitan': 'oci_Latn',
'West Central Oromo': 'gaz_Latn',
'Odia': 'ory_Orya',
'Pangasinan': 'pag_Latn',
'Eastern Panjabi': 'pan_Guru',
'Papiamento': 'pap_Latn',
'Western Persian': 'pes_Arab',
'Polish': 'pol_Latn',
'Portuguese': 'por_Latn',
'Dari': 'prs_Arab',
'Southern Pashto': 'pbt_Arab',
'Ayacucho Quechua': 'quy_Latn',
'Romanian': 'ron_Latn',
'Rundi': 'run_Latn',
'Russian': 'rus_Cyrl',
'Sango': 'sag_Latn',
'Sanskrit': 'san_Deva',
'Santali': 'sat_Olck',
'Sicilian': 'scn_Latn',
'Shan': 'shn_Mymr',
'Sinhala': 'sin_Sinh',
'Slovak': 'slk_Latn',
'Slovenian': 'slv_Latn',
'Samoan': 'smo_Latn',
'Shona': 'sna_Latn',
'Sindhi': 'snd_Arab',
'Somali': 'som_Latn',
'Southern Sotho': 'sot_Latn',
'Spanish': 'spa_Latn',
'Tosk Albanian': 'als_Latn',
'Sardinian': 'srd_Latn',
'Serbian': 'srp_Cyrl',
'Swati': 'ssw_Latn',
'Sundanese': 'sun_Latn',
'Swedish': 'swe_Latn',
'Swahili': 'swh_Latn',
'Silesian': 'szl_Latn',
'Tamil': 'tam_Taml',
'Tatar': 'tat_Cyrl',
'Telugu': 'tel_Telu',
'Tajik': 'tgk_Cyrl',
'Tagalog': 'tgl_Latn',
'Thai': 'tha_Thai',
'Tigrinya': 'tir_Ethi',
'Tamasheq Latin': 'taq_Latn',
'Tamasheq Tifinagh': 'taq_Tfng',
'Tok Pisin': 'tpi_Latn',
'Tswana': 'tsn_Latn',
'Tsonga': 'tso_Latn',
'Turkmen': 'tuk_Latn',
'Tumbuka': 'tum_Latn',
'Turkish': 'tur_Latn',
'Twi': 'twi_Latn',
'Central Atlas Tamazight': 'tzm_Tfng',
'Uyghur': 'uig_Arab',
'Ukrainian': 'ukr_Cyrl',
'Umbundu': 'umb_Latn',
'Urdu': 'urd_Arab',
'Northern Uzbek': 'uzn_Latn',
'Venetian': 'vec_Latn',
'Vietnamese': 'vie_Latn',
'Waray': 'war_Latn',
'Wolof': 'wol_Latn',
'Xhosa': 'xho_Latn',
'Eastern Yiddish': 'ydd_Hebr',
'Yoruba': 'yor_Latn',
'Yue Chinese': 'yue_Hant',
'Chinese Simplified': 'zho_Hans',
'Chinese Traditional': 'zho_Hant',
'Standard Malay': 'zsm_Latn',
'Zulu': 'zul_Latn',
}
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : str = '''facebook/nllb-200-distilled-600M'''
snake_case__ : Union[str, Any] = (
'''This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '''
'''be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '''
'''which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '''
'''plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'''
)
snake_case__ : Optional[Any] = '''translator'''
snake_case__ : Tuple = AutoTokenizer
snake_case__ : Union[str, Any] = AutoModelForSeqaSeqLM
snake_case__ : Dict = LANGUAGE_CODES
snake_case__ : str = ['''text''', '''text''', '''text''']
snake_case__ : Tuple = ['''text''']
def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Tuple:
if src_lang not in self.lang_to_code:
raise ValueError(F"""{src_lang} is not a supported language.""" )
if tgt_lang not in self.lang_to_code:
raise ValueError(F"""{tgt_lang} is not a supported language.""" )
a_ : str = self.lang_to_code[src_lang]
a_ : Any = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
SCREAMING_SNAKE_CASE__ , return_tensors='pt' , src_lang=SCREAMING_SNAKE_CASE__ , tgt_lang=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : Tuple ) -> Any:
return self.model.generate(**SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[str] ) -> Dict:
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
| 32 |
"""simple docstring"""
from __future__ import annotations
import bisect
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ):
if hi < 0:
__lowerCAmelCase : Tuple = len(_UpperCamelCase )
while lo < hi:
__lowerCAmelCase : Optional[int] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__lowerCAmelCase : int = mid + 1
else:
__lowerCAmelCase : List[str] = mid
return lo
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ):
if hi < 0:
__lowerCAmelCase : List[Any] = len(_UpperCamelCase )
while lo < hi:
__lowerCAmelCase : Union[str, Any] = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__lowerCAmelCase : Dict = mid + 1
else:
__lowerCAmelCase : str = mid
return lo
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ):
sorted_collection.insert(bisect_left(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ):
sorted_collection.insert(bisect_right(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : List[Any] = 0
__lowerCAmelCase : int = len(_UpperCamelCase ) - 1
while left <= right:
__lowerCAmelCase : List[Any] = left + (right - left) // 2
__lowerCAmelCase : Union[str, Any] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__lowerCAmelCase : Tuple = midpoint - 1
else:
__lowerCAmelCase : str = midpoint + 1
return None
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Union[str, Any] = bisect.bisect_left(_UpperCamelCase , _UpperCamelCase )
if index != len(_UpperCamelCase ) and sorted_collection[index] == item:
return index
return None
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if right < left:
return None
__lowerCAmelCase : List[str] = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , midpoint - 1 )
else:
return binary_search_by_recursion(_UpperCamelCase , _UpperCamelCase , midpoint + 1 , _UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase__ = input("""Enter numbers separated by comma:\n""").strip()
lowerCamelCase__ = sorted(int(item) for item in user_input.split(""","""))
lowerCamelCase__ = int(input("""Enter a single number to be found in the list:\n"""))
lowerCamelCase__ = binary_search(collection, target)
if result is None:
print(f'{target} was not found in {collection}.')
else:
print(f'{target} was found at position {result} in {collection}.') | 86 | 0 |
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
if not isinstance(a_ , a_ ):
__A = F'''Input value of [number={number}] must be an integer'''
raise TypeError(a_ )
if number < 1:
__A = F'''Input value of [number={number}] must be > 0'''
raise ValueError(a_ )
__A = 1
for i in range(1 , a_ ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
SCREAMING_SNAKE_CASE :Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :Union[str, Any] = {
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/resolve/main/config.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/config.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/config.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json',
}
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = "bloom"
snake_case_ = ["past_key_values"]
snake_case_ = {
"num_hidden_layers": "n_layer",
"num_attention_heads": "n_head",
}
def __init__( self : Optional[Any] ,A : List[Any]=25_08_80 ,A : Optional[int]=64 ,A : List[Any]=2 ,A : Optional[int]=8 ,A : str=1E-5 ,A : str=0.02 ,A : int=True ,A : Optional[Any]=1 ,A : int=2 ,A : str=False ,A : Dict=0.0 ,A : List[Any]=0.0 ,A : str=1 ,A : List[Any]=False ,**A : List[Any] ,):
__A = vocab_size
# Backward compatibility with n_embed kwarg
__A = kwargs.pop("n_embed" ,A )
__A = hidden_size if n_embed is None else n_embed
__A = n_layer
__A = n_head
__A = layer_norm_epsilon
__A = initializer_range
__A = use_cache
__A = pretraining_tp
__A = apply_residual_connection_post_layernorm
__A = hidden_dropout
__A = attention_dropout
__A = bos_token_id
__A = eos_token_id
__A = slow_but_exact
super().__init__(bos_token_id=A ,eos_token_id=A ,**A )
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = version.parse("1.12" )
def __init__( self : str ,A : PretrainedConfig ,A : str = "default" ,A : List[PatchingSpec] = None ,A : bool = False ,):
super().__init__(A ,task=A ,patching_specs=A ,use_past=A )
if not getattr(self._config ,"pad_token_id" ,A ):
# TODO: how to do that better?
__A = 0
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(A ,direction="inputs" ,inverted_values_shape=A )
__A = {0: "batch", 1: "past_sequence + sequence"}
else:
__A = {0: "batch", 1: "sequence"}
return common_inputs
@property
def UpperCamelCase_ ( self : Optional[Any] ):
return self._config.n_layer
@property
def UpperCamelCase_ ( self : List[Any] ):
return self._config.n_head
@property
def UpperCamelCase_ ( self : Optional[int] ):
return 1E-3
def UpperCamelCase_ ( self : Any ,A : "PreTrainedTokenizer" ,A : int = -1 ,A : int = -1 ,A : bool = False ,A : Optional["TensorType"] = None ,):
__A = super(A ,self ).generate_dummy_inputs(
A ,batch_size=A ,seq_length=A ,is_pair=A ,framework=A )
# We need to order the input in the way they appears in the forward()
__A = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__A , __A = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
__A = seqlen + 2
__A = self._config.hidden_size // self.num_attention_heads
__A = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
__A = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
__A = [
(torch.zeros(A ), torch.zeros(A )) for _ in range(self.num_layers )
]
__A = common_inputs["attention_mask"]
if self.use_past:
__A = ordered_inputs["attention_mask"].dtype
__A = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(A ,A ,dtype=A )] ,dim=1 )
return ordered_inputs
@property
def UpperCamelCase_ ( self : int ):
return 13
| 124 | 0 |
import math
def snake_case_ ( lowerCAmelCase_ : list , lowerCAmelCase_ : int ):
__lowercase : List[str] = len(lowerCAmelCase_ )
__lowercase : Any = int(math.floor(math.sqrt(lowerCAmelCase_ ) ) )
__lowercase : Optional[int] = 0
while arr[min(lowerCAmelCase_ , lowerCAmelCase_ ) - 1] < x:
__lowercase : Tuple = step
step += int(math.floor(math.sqrt(lowerCAmelCase_ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
__lowercase : List[Any] = prev + 1
if prev == min(lowerCAmelCase_ , lowerCAmelCase_ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
lowerCamelCase : Union[str, Any] = input('''Enter numbers separated by a comma:\n''').strip()
lowerCamelCase : Tuple = [int(item) for item in user_input.split(''',''')]
lowerCamelCase : List[str] = int(input('''Enter the number to be searched:\n'''))
lowerCamelCase : List[str] = jump_search(arr, x)
if res == -1:
print('''Number not found!''')
else:
print(f'''Number {x} is at index {res}''') | 233 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase : int = '''▁'''
lowerCamelCase : Optional[int] = {'''vocab_file''': '''spiece.model'''}
lowerCamelCase : str = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''}
}
lowerCamelCase : str = {
'''google/pegasus-xsum''': 5_12,
}
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Tuple = VOCAB_FILES_NAMES
_A : Tuple = VOCAB_FILES_NAMES
_A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_A : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : Optional[int] = ['''input_ids''', '''attention_mask''']
def __init__( self : Union[str, Any] , __a : int , __a : Any="<pad>" , __a : Optional[int]="</s>" , __a : Union[str, Any]="<unk>" , __a : Optional[int]="<mask_2>" , __a : Optional[int]="<mask_1>" , __a : Dict=None , __a : List[str]=103 , __a : Optional[Dict[str, Any]] = None , **__a : List[Any] , ) -> None:
"""simple docstring"""
__lowercase : Tuple = offset
if additional_special_tokens is not None:
if not isinstance(__a , __a ):
raise TypeError(
F"additional_special_tokens should be of type {type(__a )}, but is"
F" {type(__a )}" )
__lowercase : Dict = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F"<unk_{i}>" for i in range(len(__a ) , self.offset - 1 )
]
if len(set(__a ) ) != len(__a ):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
F" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}." )
__lowercase : Optional[Any] = additional_special_tokens_extended
else:
__lowercase : int = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F"<unk_{i}>" for i in range(2 , self.offset )]
__lowercase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__a , unk_token=__a , mask_token=__a , pad_token=__a , mask_token_sent=__a , offset=__a , additional_special_tokens=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
__lowercase : Optional[Any] = mask_token_sent
__lowercase : Dict = vocab_file
__lowercase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__a )
# add special tokens to encoder dict
__lowercase : Dict[int, str] = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
__lowercase : Dict[str, int] = {v: k for k, v in self.encoder.items()}
@property
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
return len(self.sp_model ) + self.offset
def lowerCAmelCase ( self : int ) -> Dict[str, int]:
"""simple docstring"""
__lowercase : Any = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.__dict__.copy()
__lowercase : Optional[Any] = None
return state
def __setstate__( self : Tuple , __a : Any ) -> Tuple:
"""simple docstring"""
__lowercase : List[str] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__lowercase : List[str] = {}
__lowercase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase ( self : Dict , __a : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(__a , out_type=__a )
def lowerCAmelCase ( self : List[str] , __a : str ) -> int:
"""simple docstring"""
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
__lowercase : Union[str, Any] = self.sp_model.piece_to_id(__a )
return sp_id + self.offset
def lowerCAmelCase ( self : Dict , __a : int ) -> str:
"""simple docstring"""
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
__lowercase : List[Any] = self.sp_model.IdToPiece(index - self.offset )
return token
def lowerCAmelCase ( self : Union[str, Any] , __a : int ) -> Dict:
"""simple docstring"""
__lowercase : Optional[int] = []
__lowercase : Tuple = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__a ) + token
__lowercase : str = []
else:
current_sub_tokens.append(__a )
out_string += self.sp_model.decode(__a )
return out_string.strip()
def lowerCAmelCase ( self : int , __a : Optional[Any]=False ) -> int:
"""simple docstring"""
return 1
def lowerCAmelCase ( self : Optional[int] , __a : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : List[Any] = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def lowerCAmelCase ( self : Union[str, Any] , __a : List , __a : Optional[List] = None , __a : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(__a )
elif token_ids_a is None:
return self._special_token_mask(__a ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowerCAmelCase ( self : Optional[int] , __a : Dict , __a : Tuple=None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCAmelCase ( self : Tuple , __a : str , __a : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__a ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__lowercase : Optional[int] = os.path.join(
__a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __a )
elif not os.path.isfile(self.vocab_file ):
with open(__a , """wb""" ) as fi:
__lowercase : Any = self.sp_model.serialized_model_proto()
fi.write(__a )
return (out_vocab_file,) | 233 | 1 |
'''simple docstring'''
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class a :
def A_ ( self : Union[str, Any] ):
torch.manual_seed(0 )
snake_case_ = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
snake_case_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
snake_case_ = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
snake_case_ = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.0001 , beta_end=0.02 , thresholding=lowercase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
snake_case_ = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def A_ ( self : Tuple ):
torch.manual_seed(0 )
snake_case_ = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
snake_case_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
snake_case_ = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , class_embed_type='''timestep''' , mid_block_scale_factor=1.414 , time_embedding_act_fn='''gelu''' , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
snake_case_ = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.0001 , beta_end=0.02 , thresholding=lowercase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
snake_case_ = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
snake_case_ = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def A_ ( self : Optional[int] ):
snake_case_ = self.get_dummy_components()
snake_case_ = self.pipeline_class(**lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
snake_case_ = self.get_dummy_inputs(lowercase_ )
snake_case_ = inputs['''prompt''']
snake_case_ = inputs['''generator''']
snake_case_ = inputs['''num_inference_steps''']
snake_case_ = inputs['''output_type''']
if "image" in inputs:
snake_case_ = inputs['''image''']
else:
snake_case_ = None
if "mask_image" in inputs:
snake_case_ = inputs['''mask_image''']
else:
snake_case_ = None
if "original_image" in inputs:
snake_case_ = inputs['''original_image''']
else:
snake_case_ = None
snake_case_ ,snake_case_ = pipe.encode_prompt(lowercase_ )
# inputs with prompt converted to embeddings
snake_case_ = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
snake_case_ = image
if mask_image is not None:
snake_case_ = mask_image
if original_image is not None:
snake_case_ = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(lowercase_ , lowercase_ , lowercase_ )
snake_case_ = pipe(**lowercase_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowercase_ )
snake_case_ = self.pipeline_class.from_pretrained(lowercase_ )
pipe_loaded.to(lowercase_ )
pipe_loaded.set_progress_bar_config(disable=lowercase_ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(lowercase_ , lowercase_ ) is None , F"`{optional_component}` did not stay set to None after loading." , )
snake_case_ = self.get_dummy_inputs(lowercase_ )
snake_case_ = inputs['''generator''']
snake_case_ = inputs['''num_inference_steps''']
snake_case_ = inputs['''output_type''']
# inputs with prompt converted to embeddings
snake_case_ = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
snake_case_ = image
if mask_image is not None:
snake_case_ = mask_image
if original_image is not None:
snake_case_ = original_image
snake_case_ = pipe_loaded(**lowercase_ )[0]
snake_case_ = np.abs(to_np(lowercase_ ) - to_np(lowercase_ ) ).max()
self.assertLess(lowercase_ , 1e-4 )
def A_ ( self : Dict ):
snake_case_ = self.get_dummy_components()
snake_case_ = self.pipeline_class(**lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
snake_case_ = self.get_dummy_inputs(lowercase_ )
snake_case_ = pipe(**lowercase_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowercase_ )
snake_case_ = self.pipeline_class.from_pretrained(lowercase_ )
pipe_loaded.to(lowercase_ )
pipe_loaded.set_progress_bar_config(disable=lowercase_ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
snake_case_ = self.get_dummy_inputs(lowercase_ )
snake_case_ = pipe_loaded(**lowercase_ )[0]
snake_case_ = np.abs(to_np(lowercase_ ) - to_np(lowercase_ ) ).max()
self.assertLess(lowercase_ , 1e-4 )
| 72 |
'''simple docstring'''
a : Dict = 6_5521
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
snake_case_ = 1
snake_case_ = 0
for plain_chr in plain_text:
snake_case_ = (a + ord(__UpperCAmelCase )) % MOD_ADLER
snake_case_ = (b + a) % MOD_ADLER
return (b << 16) | a
| 72 | 1 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__UpperCAmelCase = 16
__UpperCAmelCase = 32
def lowercase__ ( __snake_case : Dict , __snake_case : Union[str, Any] = 16 , __snake_case : Tuple = "bert-base-cased" ):
'''simple docstring'''
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained(lowerCamelCase_ )
UpperCAmelCase_ : Dict = load_dataset('glue' , 'mrpc' )
def tokenize_function(__snake_case : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ : List[Any] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCAmelCase_ : List[str] = datasets.map(
lowerCamelCase_ , batched=lowerCamelCase_ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=lowerCamelCase_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase_ : int = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(__snake_case : Optional[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCamelCase_ , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(lowerCamelCase_ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
UpperCAmelCase_ : Tuple = DataLoader(
tokenized_datasets['train'] , shuffle=lowerCamelCase_ , collate_fn=lowerCamelCase_ , batch_size=lowerCamelCase_ )
UpperCAmelCase_ : str = DataLoader(
tokenized_datasets['validation'] , shuffle=lowerCamelCase_ , collate_fn=lowerCamelCase_ , batch_size=lowerCamelCase_ )
return train_dataloader, eval_dataloader
def lowercase__ ( __snake_case : List[str] , __snake_case : Dict ):
'''simple docstring'''
UpperCAmelCase_ : str = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase_ : Optional[int] = config['lr']
UpperCAmelCase_ : Tuple = int(config['num_epochs'] )
UpperCAmelCase_ : int = int(config['seed'] )
UpperCAmelCase_ : Tuple = int(config['batch_size'] )
UpperCAmelCase_ : Any = args.model_name_or_path
set_seed(lowerCamelCase_ )
UpperCAmelCase_ : str = get_dataloaders(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase_ : List[Any] = AutoModelForSequenceClassification.from_pretrained(lowerCamelCase_ , return_dict=lowerCamelCase_ )
# Instantiate optimizer
UpperCAmelCase_ : Union[str, Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCAmelCase_ : int = optimizer_cls(params=model.parameters() , lr=lowerCamelCase_ )
if accelerator.state.deepspeed_plugin is not None:
UpperCAmelCase_ : Any = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
UpperCAmelCase_ : Tuple = 1
UpperCAmelCase_ : Union[str, Any] = (len(lowerCamelCase_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCAmelCase_ : List[str] = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase_ , num_warmup_steps=0 , num_training_steps=lowerCamelCase_ , )
else:
UpperCAmelCase_ : Dict = DummyScheduler(lowerCamelCase_ , total_num_steps=lowerCamelCase_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase_ : Union[str, Any] = accelerator.prepare(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# We need to keep track of how many total steps we have iterated over
UpperCAmelCase_ : Any = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCAmelCase_ : Union[str, Any] = 0
# Now we train the model
UpperCAmelCase_ : Tuple = evaluate.load('glue' , 'mrpc' )
UpperCAmelCase_ : Any = 0
UpperCAmelCase_ : Optional[Any] = {}
for epoch in range(lowerCamelCase_ , lowerCamelCase_ ):
model.train()
for step, batch in enumerate(lowerCamelCase_ ):
UpperCAmelCase_ : List[str] = model(**lowerCamelCase_ )
UpperCAmelCase_ : Tuple = outputs.loss
UpperCAmelCase_ : Tuple = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
UpperCAmelCase_ : int = 0
for step, batch in enumerate(lowerCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase_ : Dict = model(**lowerCamelCase_ )
UpperCAmelCase_ : List[str] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
UpperCAmelCase_ : Dict = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowerCamelCase_ ) - 1:
UpperCAmelCase_ : Tuple = predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCAmelCase_ : Optional[Any] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowerCamelCase_ , references=lowerCamelCase_ , )
UpperCAmelCase_ : Optional[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , lowerCamelCase_ )
UpperCAmelCase_ : List[str] = eval_metric['accuracy']
if best_performance < eval_metric["accuracy"]:
UpperCAmelCase_ : Union[str, Any] = eval_metric['accuracy']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F"Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=lowerCamelCase_ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=lowerCamelCase_ , )
parser.add_argument(
'--output_dir' , type=lowerCamelCase_ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--performance_lower_bound' , type=lowerCamelCase_ , default=lowerCamelCase_ , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , )
parser.add_argument(
'--num_epochs' , type=lowerCamelCase_ , default=3 , help='Number of train epochs.' , )
UpperCAmelCase_ : List[str] = parser.parse_args()
UpperCAmelCase_ : List[str] = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
main()
| 29 |
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class _lowerCamelCase( unittest.TestCase ):
lowercase_ : Dict = JukeboxTokenizer
lowercase_ : Dict = {
"""artist""": """Zac Brown Band""",
"""genres""": """Country""",
"""lyrics""": """I met a traveller from an antique land,
Who said \"Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
""",
}
@require_torch
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
import torch
_lowercase : str = JukeboxTokenizer.from_pretrained('openai/jukebox-1b-lyrics')
_lowercase : Optional[Any] = tokenizer(**self.metas)['input_ids']
# fmt: off
_lowercase : Optional[int] = [
torch.tensor([[
0, 0, 0, 71_69, 5_07, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]]),
torch.tensor([[0, 0, 0, 10_69, 11]]),
torch.tensor([[0, 0, 0, 10_69, 11]]),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0], EXPECTED_OUTPUT[0]))
self.assertTrue(torch.allclose(tokens[1], EXPECTED_OUTPUT[1]))
self.assertTrue(torch.allclose(tokens[2], EXPECTED_OUTPUT[2]))
@require_torch
def UpperCamelCase ( self) -> int:
"""simple docstring"""
import torch
_lowercase : List[str] = JukeboxTokenizer.from_pretrained('openai/jukebox-5b-lyrics')
_lowercase : List[str] = tokenizer(**self.metas)['input_ids']
# fmt: off
_lowercase : Optional[int] = [
torch.tensor([[
0, 0, 0, 10_69, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]]),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]]),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]]),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0], EXPECTED_OUTPUT[0]))
self.assertTrue(torch.allclose(tokens[1], EXPECTED_OUTPUT[1]))
self.assertTrue(torch.allclose(tokens[2], EXPECTED_OUTPUT[2]))
| 21 | 0 |
from timeit import timeit
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> int:
if number < 0:
raise ValueError("the value of input must not be negative")
a = 0
while number:
number &= number - 1
result += 1
return result
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> int:
if number < 0:
raise ValueError("the value of input must not be negative")
a = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def SCREAMING_SNAKE_CASE ( ) -> None:
def do_benchmark(__UpperCamelCase) -> None:
a = "import __main__ as z"
print(f'''Benchmark when {number = }:''')
print(f'''{get_set_bits_count_using_modulo_operator(__UpperCamelCase) = }''')
a = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=__UpperCamelCase)
print(f'''timeit() runs in {timing} seconds''')
print(f'''{get_set_bits_count_using_brian_kernighans_algorithm(__UpperCamelCase) = }''')
a = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=__UpperCamelCase , )
print(f'''timeit() runs in {timing} seconds''')
for number in (25, 37, 58, 0):
do_benchmark(__UpperCamelCase)
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 180 |
import math
import unittest
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> bool:
assert isinstance(__UpperCamelCase , __UpperCamelCase) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__UpperCamelCase) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class a__ ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> List[str]:
'''simple docstring'''
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def lowerCAmelCase_ ( self ) -> Tuple:
'''simple docstring'''
with self.assertRaises(A ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , "Zero doesn't have any positive factors, primes must have exactly two." , )
self.assertFalse(
is_prime(1 ) , "One only has 1 positive factor, primes must have exactly two." , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 180 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase : List[Any] = {
"configuration_maskformer": ["MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "MaskFormerConfig"],
"configuration_maskformer_swin": ["MaskFormerSwinConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Dict = ["MaskFormerFeatureExtractor"]
UpperCAmelCase : Union[str, Any] = ["MaskFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[int] = [
"MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"MaskFormerForInstanceSegmentation",
"MaskFormerModel",
"MaskFormerPreTrainedModel",
]
UpperCAmelCase : List[Any] = [
"MaskFormerSwinBackbone",
"MaskFormerSwinModel",
"MaskFormerSwinPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 136 |
"""simple docstring"""
from typing import Dict, Optional
import numpy as np
import datasets
UpperCAmelCase : Tuple = "\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n"
UpperCAmelCase : Optional[int] = "\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric(\"mean_iou\")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n"
UpperCAmelCase : List[str] = "\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}"
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False , ) -> Dict:
'''simple docstring'''
if label_map is not None:
for old_id, new_id in label_map.items():
lowercase_ = new_id
# turn into Numpy arrays
lowercase_ = np.array(__lowerCAmelCase )
lowercase_ = np.array(__lowerCAmelCase )
if reduce_labels:
lowercase_ = 2_55
lowercase_ = label - 1
lowercase_ = 2_55
lowercase_ = label != ignore_index
lowercase_ = np.not_equal(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = pred_label[mask]
lowercase_ = np.array(__lowerCAmelCase )[mask]
lowercase_ = pred_label[pred_label == label]
lowercase_ = np.histogram(__lowerCAmelCase , bins=__lowerCAmelCase , range=(0, num_labels - 1) )[0]
lowercase_ = np.histogram(__lowerCAmelCase , bins=__lowerCAmelCase , range=(0, num_labels - 1) )[0]
lowercase_ = np.histogram(__lowerCAmelCase , bins=__lowerCAmelCase , range=(0, num_labels - 1) )[0]
lowercase_ = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False , ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(__lowerCAmelCase , __lowerCAmelCase ):
lowercase_ , lowercase_ , lowercase_ , lowercase_ = intersect_and_union(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , ) -> Any:
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ = total_intersect_and_union(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# compute metrics
lowercase_ = {}
lowercase_ = total_area_intersect.sum() / total_area_label.sum()
lowercase_ = total_area_intersect / total_area_union
lowercase_ = total_area_intersect / total_area_label
lowercase_ = np.nanmean(__lowerCAmelCase )
lowercase_ = np.nanmean(__lowerCAmelCase )
lowercase_ = all_acc
lowercase_ = iou
lowercase_ = acc
if nan_to_num is not None:
lowercase_ = {metric: np.nan_to_num(__lowerCAmelCase , nan=__lowerCAmelCase ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
"""predictions""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16"""))),
"""references""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16"""))),
}) , reference_urls=[
"""https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py"""
] , )
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : bool , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[Dict[int, int]] = None , lowerCAmelCase_ : bool = False , ):
"""simple docstring"""
lowercase_ = mean_iou(
results=lowerCAmelCase_ , gt_seg_maps=lowerCAmelCase_ , num_labels=lowerCAmelCase_ , ignore_index=lowerCAmelCase_ , nan_to_num=lowerCAmelCase_ , label_map=lowerCAmelCase_ , reduce_labels=lowerCAmelCase_ , )
return iou_result
| 136 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a_ = {
"""configuration_layoutlmv2""": ["""LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv2Config"""],
"""processing_layoutlmv2""": ["""LayoutLMv2Processor"""],
"""tokenization_layoutlmv2""": ["""LayoutLMv2Tokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""LayoutLMv2TokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""LayoutLMv2FeatureExtractor"""]
a_ = ["""LayoutLMv2ImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LayoutLMv2ForQuestionAnswering""",
"""LayoutLMv2ForSequenceClassification""",
"""LayoutLMv2ForTokenClassification""",
"""LayoutLMv2Layer""",
"""LayoutLMv2Model""",
"""LayoutLMv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 370 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
a_ = {"""configuration_gpt_neox""": ["""GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXConfig"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""GPTNeoXTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXForCausalLM""",
"""GPTNeoXForQuestionAnswering""",
"""GPTNeoXForSequenceClassification""",
"""GPTNeoXForTokenClassification""",
"""GPTNeoXLayer""",
"""GPTNeoXModel""",
"""GPTNeoXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 291 | 0 |
def _lowerCAmelCase (_lowerCAmelCase):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase):
raise TypeError("Input value must be an 'int' type")
UpperCamelCase_ = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 128 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _lowercase (a_ ):
'''simple docstring'''
lowercase__ = (IPNDMScheduler,)
lowercase__ = (("""num_inference_steps""", 50),)
def _lowerCamelCase ( self , **snake_case__ ):
'''simple docstring'''
UpperCamelCase_ = {"num_train_timesteps": 1000}
config.update(**snake_case__ )
return config
def _lowerCamelCase ( self , snake_case__=0 , **snake_case__ ):
'''simple docstring'''
UpperCamelCase_ = dict(self.forward_default_kwargs )
UpperCamelCase_ = kwargs.pop("num_inference_steps" , snake_case__ )
UpperCamelCase_ = self.dummy_sample
UpperCamelCase_ = 0.1 * sample
UpperCamelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCamelCase_ = self.get_scheduler_config(**snake_case__ )
UpperCamelCase_ = scheduler_class(**snake_case__ )
scheduler.set_timesteps(snake_case__ )
# copy over dummy past residuals
UpperCamelCase_ = dummy_past_residuals[:]
if time_step is None:
UpperCamelCase_ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case__ )
UpperCamelCase_ = scheduler_class.from_pretrained(snake_case__ )
new_scheduler.set_timesteps(snake_case__ )
# copy over dummy past residuals
UpperCamelCase_ = dummy_past_residuals[:]
UpperCamelCase_ = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
UpperCamelCase_ = new_scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCamelCase_ = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
UpperCamelCase_ = new_scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _lowerCamelCase ( self ):
'''simple docstring'''
pass
def _lowerCamelCase ( self , snake_case__=0 , **snake_case__ ):
'''simple docstring'''
UpperCamelCase_ = dict(self.forward_default_kwargs )
UpperCamelCase_ = kwargs.pop("num_inference_steps" , snake_case__ )
UpperCamelCase_ = self.dummy_sample
UpperCamelCase_ = 0.1 * sample
UpperCamelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCamelCase_ = self.get_scheduler_config()
UpperCamelCase_ = scheduler_class(**snake_case__ )
scheduler.set_timesteps(snake_case__ )
# copy over dummy past residuals (must be after setting timesteps)
UpperCamelCase_ = dummy_past_residuals[:]
if time_step is None:
UpperCamelCase_ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case__ )
UpperCamelCase_ = scheduler_class.from_pretrained(snake_case__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(snake_case__ )
# copy over dummy past residual (must be after setting timesteps)
UpperCamelCase_ = dummy_past_residuals[:]
UpperCamelCase_ = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
UpperCamelCase_ = new_scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCamelCase_ = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
UpperCamelCase_ = new_scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _lowerCamelCase ( self , **snake_case__ ):
'''simple docstring'''
UpperCamelCase_ = self.scheduler_classes[0]
UpperCamelCase_ = self.get_scheduler_config(**snake_case__ )
UpperCamelCase_ = scheduler_class(**snake_case__ )
UpperCamelCase_ = 10
UpperCamelCase_ = self.dummy_model()
UpperCamelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(snake_case__ )
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase_ = model(snake_case__ , snake_case__ )
UpperCamelCase_ = scheduler.step(snake_case__ , snake_case__ , snake_case__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase_ = model(snake_case__ , snake_case__ )
UpperCamelCase_ = scheduler.step(snake_case__ , snake_case__ , snake_case__ ).prev_sample
return sample
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = dict(self.forward_default_kwargs )
UpperCamelCase_ = kwargs.pop("num_inference_steps" , snake_case__ )
for scheduler_class in self.scheduler_classes:
UpperCamelCase_ = self.get_scheduler_config()
UpperCamelCase_ = scheduler_class(**snake_case__ )
UpperCamelCase_ = self.dummy_sample
UpperCamelCase_ = 0.1 * sample
if num_inference_steps is not None and hasattr(snake_case__ , "set_timesteps" ):
scheduler.set_timesteps(snake_case__ )
elif num_inference_steps is not None and not hasattr(snake_case__ , "set_timesteps" ):
UpperCamelCase_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCamelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
UpperCamelCase_ = dummy_past_residuals[:]
UpperCamelCase_ = scheduler.timesteps[5]
UpperCamelCase_ = scheduler.timesteps[6]
UpperCamelCase_ = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
UpperCamelCase_ = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
UpperCamelCase_ = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
UpperCamelCase_ = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _lowerCamelCase ( self ):
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=snake_case__ , time_step=snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=snake_case__ , time_step=snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.full_loop()
UpperCamelCase_ = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_mean.item() - 254_0529 ) < 10
| 128 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Any = logging.get_logger(__name__)
_lowerCamelCase : List[Any] = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''megatron-bert'''
def __init__( self : int , UpperCAmelCase__ : Any=29_056 , UpperCAmelCase__ : int=1_024 , UpperCAmelCase__ : int=24 , UpperCAmelCase__ : Optional[Any]=16 , UpperCAmelCase__ : Union[str, Any]=4_096 , UpperCAmelCase__ : List[str]="gelu" , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : str=0.1 , UpperCAmelCase__ : List[Any]=512 , UpperCAmelCase__ : Dict=2 , UpperCAmelCase__ : Optional[int]=0.02 , UpperCAmelCase__ : Union[str, Any]=1e-12 , UpperCAmelCase__ : Union[str, Any]=0 , UpperCAmelCase__ : Any="absolute" , UpperCAmelCase__ : Optional[Any]=True , **UpperCAmelCase__ : Union[str, Any] , ) ->List[str]:
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase__ , **UpperCAmelCase__)
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
| 364 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
_lowerCamelCase : Optional[Any] = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Any = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
_lowerCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 231 | 0 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
UpperCAmelCase : List[Any] = "hf-internal-testing/tiny-random-bert"
UpperCAmelCase : Optional[Any] = os.path.join(TRANSFORMERS_CACHE, "models--hf-internal-testing--tiny-random-bert")
UpperCAmelCase : int = "9b8c223d42b2188cb49d29af482996f9d0f3e5a6"
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = cached_file(lowerCAmelCase_ , lowerCAmelCase_)
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(lowerCAmelCase_))
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(lowerCAmelCase_ , lowerCAmelCase_)))
with open(os.path.join(lowerCAmelCase_ , """refs""" , """main""")) as f:
lowercase_ = f.read()
self.assertEqual(lowerCAmelCase_ , os.path.join(lowerCAmelCase_ , """snapshots""" , lowerCAmelCase_ , lowerCAmelCase_))
self.assertTrue(os.path.isfile(lowerCAmelCase_))
# File is cached at the same place the second time.
lowercase_ = cached_file(lowerCAmelCase_ , lowerCAmelCase_)
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_)
# Using a specific revision to test the full commit hash.
lowercase_ = cached_file(lowerCAmelCase_ , lowerCAmelCase_ , revision="""9b8c223""")
self.assertEqual(lowerCAmelCase_ , os.path.join(lowerCAmelCase_ , """snapshots""" , lowerCAmelCase_ , lowerCAmelCase_))
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
with self.assertRaisesRegex(lowerCAmelCase_ , """is not a valid model identifier"""):
lowercase_ = cached_file("""tiny-random-bert""" , lowerCAmelCase_)
with self.assertRaisesRegex(lowerCAmelCase_ , """is not a valid git identifier"""):
lowercase_ = cached_file(lowerCAmelCase_ , lowerCAmelCase_ , revision="""aaaa""")
with self.assertRaisesRegex(lowerCAmelCase_ , """does not appear to have a file named"""):
lowercase_ = cached_file(lowerCAmelCase_ , """conf""")
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
with self.assertRaisesRegex(lowerCAmelCase_ , """does not appear to have a file named"""):
lowercase_ = cached_file(lowerCAmelCase_ , """conf""")
with open(os.path.join(lowerCAmelCase_ , """refs""" , """main""")) as f:
lowercase_ = f.read()
self.assertTrue(os.path.isfile(os.path.join(lowerCAmelCase_ , """.no_exist""" , lowerCAmelCase_ , """conf""")))
lowercase_ = cached_file(lowerCAmelCase_ , """conf""" , _raise_exceptions_for_missing_entries=lowerCAmelCase_)
self.assertIsNone(lowerCAmelCase_)
lowercase_ = cached_file(lowerCAmelCase_ , """conf""" , local_files_only=lowerCAmelCase_ , _raise_exceptions_for_missing_entries=lowerCAmelCase_)
self.assertIsNone(lowerCAmelCase_)
lowercase_ = mock.Mock()
lowercase_ = 5_0_0
lowercase_ = {}
lowercase_ = HTTPError
lowercase_ = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" , return_value=lowerCAmelCase_) as mock_head:
lowercase_ = cached_file(lowerCAmelCase_ , """conf""" , _raise_exceptions_for_connection_errors=lowerCAmelCase_)
self.assertIsNone(lowerCAmelCase_)
# This check we did call the fake head request
mock_head.assert_called()
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
self.assertTrue(has_file("""hf-internal-testing/tiny-bert-pt-only""" , lowerCAmelCase_))
self.assertFalse(has_file("""hf-internal-testing/tiny-bert-pt-only""" , lowerCAmelCase_))
self.assertFalse(has_file("""hf-internal-testing/tiny-bert-pt-only""" , lowerCAmelCase_))
def _UpperCAmelCase ( self : int):
"""simple docstring"""
self.assertIsNone(get_file_from_repo("""bert-base-cased""" , """ahah.txt"""))
# The function raises if the repository does not exist.
with self.assertRaisesRegex(lowerCAmelCase_ , """is not a valid model identifier"""):
get_file_from_repo("""bert-base-case""" , lowerCAmelCase_)
# The function raises if the revision does not exist.
with self.assertRaisesRegex(lowerCAmelCase_ , """is not a valid git identifier"""):
get_file_from_repo("""bert-base-cased""" , lowerCAmelCase_ , revision="""ahaha""")
lowercase_ = get_file_from_repo("""bert-base-cased""" , lowerCAmelCase_)
# The name is the cached name which is not very easy to test, so instead we load the content.
lowercase_ = json.loads(open(lowerCAmelCase_ , """r""").read())
self.assertEqual(config["""hidden_size"""] , 7_6_8)
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ = Path(lowerCAmelCase_) / """a.txt"""
filename.touch()
self.assertEqual(get_file_from_repo(lowerCAmelCase_ , """a.txt""") , str(lowerCAmelCase_))
self.assertIsNone(get_file_from_repo(lowerCAmelCase_ , """b.txt"""))
| 136 |
"""simple docstring"""
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase : int = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "encodec"
def __init__( self : Union[str, Any] , lowerCAmelCase_ : Tuple=[1.5, 3.0, 6.0, 12.0, 24.0] , lowerCAmelCase_ : Tuple=2_4_0_0_0 , lowerCAmelCase_ : List[str]=1 , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Dict=1_2_8 , lowerCAmelCase_ : Dict=3_2 , lowerCAmelCase_ : Tuple=1 , lowerCAmelCase_ : Dict=[8, 5, 4, 2] , lowerCAmelCase_ : Optional[Any]="weight_norm" , lowerCAmelCase_ : Any=7 , lowerCAmelCase_ : int=7 , lowerCAmelCase_ : str=3 , lowerCAmelCase_ : Optional[int]=2 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : int="reflect" , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Optional[int]=2 , lowerCAmelCase_ : List[Any]=1.0 , lowerCAmelCase_ : Dict=1_0_2_4 , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Optional[int]=True , **lowerCAmelCase_ : List[str] , ):
"""simple docstring"""
lowercase_ = target_bandwidths
lowercase_ = sampling_rate
lowercase_ = audio_channels
lowercase_ = normalize
lowercase_ = chunk_length_s
lowercase_ = overlap
lowercase_ = hidden_size
lowercase_ = num_filters
lowercase_ = num_residual_layers
lowercase_ = upsampling_ratios
lowercase_ = norm_type
lowercase_ = kernel_size
lowercase_ = last_kernel_size
lowercase_ = residual_kernel_size
lowercase_ = dilation_growth_rate
lowercase_ = use_causal_conv
lowercase_ = pad_mode
lowercase_ = compress
lowercase_ = num_lstm_layers
lowercase_ = trim_right_ratio
lowercase_ = codebook_size
lowercase_ = codebook_dim if codebook_dim is not None else hidden_size
lowercase_ = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F'''self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}''')
super().__init__(**lowerCAmelCase_)
@property
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate)
@property
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length))
@property
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = np.prod(self.upsampling_ratios)
return math.ceil(self.sampling_rate / hop_length)
@property
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
return int(1_0_0_0 * self.target_bandwidths[-1] // (self.frame_rate * 1_0))
| 136 | 1 |
from math import factorial
def __lowerCamelCase ( __magic_name__ : int , __magic_name__ : int , __magic_name__ : float ):
if successes > trials:
raise ValueError("successes must be lower or equal to trials" )
if trials < 0 or successes < 0:
raise ValueError("the function is defined for non-negative integers" )
if not isinstance(__magic_name__ , __magic_name__ ) or not isinstance(__magic_name__ , __magic_name__ ):
raise ValueError("the function is defined for non-negative integers" )
if not 0 < prob < 1:
raise ValueError("prob has to be in range of 1 - 0" )
a__: Optional[Any] =(prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
a__: str =float(factorial(__magic_name__ ) )
coefficient /= factorial(__magic_name__ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.75))
| 42 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
__UpperCAmelCase = logging.getLogger(__name__)
def __lowerCamelCase ( __magic_name__ : Tuple , __magic_name__ : List[Any] ):
# save results
if os.path.exists(__magic_name__ ):
if os.path.exists(os.path.join(__magic_name__ , "config.json" ) ) and os.path.isfile(
os.path.join(__magic_name__ , "config.json" ) ):
os.remove(os.path.join(__magic_name__ , "config.json" ) )
if os.path.exists(os.path.join(__magic_name__ , "pytorch_model.bin" ) ) and os.path.isfile(
os.path.join(__magic_name__ , "pytorch_model.bin" ) ):
os.remove(os.path.join(__magic_name__ , "pytorch_model.bin" ) )
else:
os.makedirs(__magic_name__ )
model.save_pretrained(__magic_name__ )
def __lowerCamelCase ( __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any]=False ):
a__: int =2
if unlogit:
a__: Union[str, Any] =torch.pow(__magic_name__ , __magic_name__ )
a__: str =p * torch.log(__magic_name__ )
a__: Dict =0
return -plogp.sum(dim=-1 )
def __lowerCamelCase ( __magic_name__ : Optional[int] ):
logger.info("lv, h >\t" + "\t".join(F"{x + 1}" for x in range(len(__magic_name__ ) ) ) )
for row in range(len(__magic_name__ ) ):
if tensor.dtype != torch.long:
logger.info(F"layer {row + 1}:\t" + "\t".join(F"{x:.5f}" for x in tensor[row].cpu().data ) )
else:
logger.info(F"layer {row + 1}:\t" + "\t".join(F"{x:d}" for x in tensor[row].cpu().data ) )
def __lowerCamelCase ( __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Union[str, Any]=True , __magic_name__ : int=True , __magic_name__ : Dict=None , __magic_name__ : Union[str, Any]=False ):
a__ , a__: int =model.config.num_hidden_layers, model.config.num_attention_heads
a__: List[str] =torch.zeros(__magic_name__ , __magic_name__ ).to(args.device )
a__: List[Any] =torch.zeros(__magic_name__ , __magic_name__ ).to(args.device )
if head_mask is None:
a__: Any =torch.ones(__magic_name__ , __magic_name__ ).to(args.device )
head_mask.requires_grad_(requires_grad=__magic_name__ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
a__: int =None
a__: Optional[int] =0.0
a__: Optional[Any] =0.0
for step, inputs in enumerate(tqdm(__magic_name__ , desc="Iteration" , disable=args.local_rank not in [-1, 0] ) ):
a__: Tuple =tuple(t.to(args.device ) for t in inputs )
((a__) , ): List[Any] =inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
a__: List[Any] =model(__magic_name__ , labels=__magic_name__ , head_mask=__magic_name__ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
a__ , a__ , a__: Optional[Any] =(
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(__magic_name__ ):
a__: int =entropy(attn.detach() , __magic_name__ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(__magic_name__ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
a__: Any =2
a__: Any =torch.pow(torch.pow(__magic_name__ , __magic_name__ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
a__: int =(head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("Attention entropies" )
print_ad_tensor(__magic_name__ )
if compute_importance:
logger.info("Head importance scores" )
print_ad_tensor(__magic_name__ )
logger.info("Head ranked by importance scores" )
a__: Any =torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
a__: List[Any] =torch.arange(
head_importance.numel() , device=args.device )
a__: int =head_ranks.view_as(__magic_name__ )
print_ad_tensor(__magic_name__ )
return attn_entropy, head_importance, total_loss
def __lowerCamelCase ( __magic_name__ : Any , __magic_name__ : List[str] , __magic_name__ : Tuple ):
a__ , a__ , a__: List[Any] =compute_heads_importance(__magic_name__ , __magic_name__ , __magic_name__ , compute_entropy=__magic_name__ )
a__: List[str] =1 / loss # instead of downsteam score use the LM loss
logger.info("Pruning: original score: %f, threshold: %f" , __magic_name__ , original_score * args.masking_threshold )
a__: Union[str, Any] =torch.ones_like(__magic_name__ )
a__: Optional[Any] =max(1 , int(new_head_mask.numel() * args.masking_amount ) )
a__: Union[str, Any] =original_score
while current_score >= original_score * args.masking_threshold:
a__: Dict =new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
a__: List[Any] =float("Inf" )
a__: List[str] =head_importance.view(-1 ).sort()[1]
if len(__magic_name__ ) <= num_to_mask:
print("BREAK BY num_to_mask" )
break
# mask heads
a__: Union[str, Any] =current_heads_to_mask[:num_to_mask]
logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist() ) )
a__: Any =new_head_mask.view(-1 )
a__: Optional[int] =0.0
a__: Optional[int] =new_head_mask.view_as(__magic_name__ )
a__: str =new_head_mask.clone().detach()
print_ad_tensor(__magic_name__ )
# Compute metric and head importance again
a__ , a__ , a__: Optional[Any] =compute_heads_importance(
__magic_name__ , __magic_name__ , __magic_name__ , compute_entropy=__magic_name__ , head_mask=__magic_name__ )
a__: Optional[int] =1 / loss
logger.info(
"Masking: current score: %f, remaining heads %d (%.1f percents)" , __magic_name__ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info("Final head mask" )
print_ad_tensor(__magic_name__ )
np.save(os.path.join(args.output_dir , "head_mask.npy" ) , head_mask.detach().cpu().numpy() )
return head_mask
def __lowerCamelCase ( __magic_name__ : Tuple , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] , __magic_name__ : Any ):
a__: Any =datetime.now()
a__ , a__ , a__: int =compute_heads_importance(
__magic_name__ , __magic_name__ , __magic_name__ , compute_entropy=__magic_name__ , compute_importance=__magic_name__ , head_mask=__magic_name__ )
a__: Optional[int] =1 / loss
a__: Optional[Any] =datetime.now() - before_time
a__: str =sum(p.numel() for p in model.parameters() )
a__: Optional[Any] ={
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__magic_name__ ) )
}
for k, v in heads_to_prune.items():
if isinstance(__magic_name__ , __magic_name__ ):
a__: List[Any] =[
v,
]
assert sum(len(__magic_name__ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(__magic_name__ )
a__: Dict =sum(p.numel() for p in model.parameters() )
a__: Any =datetime.now()
a__ , a__ , a__: Union[str, Any] =compute_heads_importance(
__magic_name__ , __magic_name__ , __magic_name__ , compute_entropy=__magic_name__ , compute_importance=__magic_name__ , head_mask=__magic_name__ , actually_pruned=__magic_name__ , )
a__: Dict =1 / loss
a__: Dict =datetime.now() - before_time
logger.info(
"Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , __magic_name__ , __magic_name__ , pruned_num_params / original_num_params * 100 , )
logger.info("Pruning: score with masking: %f score with pruning: %f" , __magic_name__ , __magic_name__ )
logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 100 )
save_model(__magic_name__ , args.output_dir )
def __lowerCamelCase ( ):
a__: int =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir" , default=__magic_name__ , type=__magic_name__ , required=__magic_name__ , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , )
parser.add_argument(
"--model_name_or_path" , default=__magic_name__ , type=__magic_name__ , required=__magic_name__ , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--output_dir" , default=__magic_name__ , type=__magic_name__ , required=__magic_name__ , help="The output directory where the model predictions and checkpoints will be written." , )
# Other parameters
parser.add_argument(
"--config_name" , default="" , type=__magic_name__ , help="Pretrained config name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--tokenizer_name" , default="" , type=__magic_name__ , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--cache_dir" , default=__magic_name__ , type=__magic_name__ , help="Where do you want to store the pre-trained models downloaded from s3" , )
parser.add_argument(
"--data_subset" , type=__magic_name__ , default=-1 , help="If > 0: limit the data to a subset of data_subset instances." )
parser.add_argument(
"--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory" )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
parser.add_argument(
"--dont_normalize_importance_by_layer" , action="store_true" , help="Don't normalize importance score by layers" )
parser.add_argument(
"--dont_normalize_global_importance" , action="store_true" , help="Don't normalize all importance scores between 0 and 1" , )
parser.add_argument(
"--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy." )
parser.add_argument(
"--masking_threshold" , default=0.9 , type=__magic_name__ , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , )
parser.add_argument(
"--masking_amount" , default=0.1 , type=__magic_name__ , help="Amount to heads to masking at each masking step." )
parser.add_argument("--metric_name" , default="acc" , type=__magic_name__ , help="Metric to use for head masking." )
parser.add_argument(
"--max_seq_length" , default=128 , type=__magic_name__ , help=(
"The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, sequences shorter padded."
) , )
parser.add_argument("--batch_size" , default=1 , type=__magic_name__ , help="Batch size." )
parser.add_argument("--seed" , type=__magic_name__ , default=42 )
parser.add_argument("--local_rank" , type=__magic_name__ , default=-1 , help="local_rank for distributed training on gpus" )
parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available" )
parser.add_argument("--server_ip" , type=__magic_name__ , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=__magic_name__ , default="" , help="Can be used for distant debugging." )
a__: Union[str, Any] =parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__magic_name__ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
a__: Tuple =torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" )
a__: Optional[Any] =0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
a__: Optional[Any] =torch.device("cuda" , args.local_rank )
a__: Dict =1
torch.distributed.init_process_group(backend="nccl" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
a__: Dict =GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
a__: List[str] =nn.parallel.DistributedDataParallel(
__magic_name__ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__magic_name__ )
elif args.n_gpu > 1:
a__: List[str] =nn.DataParallel(__magic_name__ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=__magic_name__ )
torch.save(__magic_name__ , os.path.join(args.output_dir , "run_args.bin" ) )
logger.info("Training/evaluation parameters %s" , __magic_name__ )
# Prepare dataset
a__: int =np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
a__: Any =(torch.from_numpy(__magic_name__ ),)
a__: List[str] =TensorDataset(*__magic_name__ )
a__: Optional[int] =RandomSampler(__magic_name__ )
a__: Union[str, Any] =DataLoader(__magic_name__ , sampler=__magic_name__ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(__magic_name__ , __magic_name__ , __magic_name__ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
a__: Optional[int] =mask_heads(__magic_name__ , __magic_name__ , __magic_name__ )
prune_heads(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
if __name__ == "__main__":
main()
| 42 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
lowercase__ : int = FunnelConfig.from_json_file(_lowerCAmelCase )
print(f"""Building PyTorch model from configuration: {config}""" )
lowercase__ : List[Any] = FunnelBaseModel(_lowerCAmelCase ) if base_model else FunnelModel(_lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , _lowerCAmelCase )
if __name__ == "__main__":
_UpperCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not."
)
_UpperCamelCase : List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 77 | """simple docstring"""
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_UpperCamelCase : Any = logging.getLogger(__name__)
_UpperCamelCase : Optional[Any] = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
_UpperCamelCase : Any = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ : Optional[str] = field(
default=_a , metadata={
"help": (
"The model checkpoint for weights initialization. Leave None if you want to train a model from"
" scratch."
)
} , )
lowerCamelCase__ : Optional[str] = field(
default=_a , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(_a)} , )
lowerCamelCase__ : Optional[str] = field(
default=_a , metadata={"help": "Pretrained config name or path if not the same as model_name"})
lowerCamelCase__ : Optional[str] = field(
default=_a , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
lowerCamelCase__ : Optional[str] = field(
default=_a , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ : Optional[str] = field(
default=_a , metadata={"help": "The input training data file (a text file)."})
lowerCamelCase__ : Optional[str] = field(
default=_a , metadata={
"help": (
"The input training data files (multiple files in glob format). "
"Very often splitting large files to smaller files can prevent tokenizer going out of memory"
)
} , )
lowerCamelCase__ : Optional[str] = field(
default=_a , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
lowerCamelCase__ : Optional[str] = field(
default=_a , metadata={"help": "An optional input train ref data file for whole word mask in Chinese."} , )
lowerCamelCase__ : Optional[str] = field(
default=_a , metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."} , )
lowerCamelCase__ : bool = field(
default=_a , metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."} , )
lowerCamelCase__ : bool = field(
default=_a , metadata={"help": "Train with masked-language modeling loss instead of language modeling."})
lowerCamelCase__ : bool = field(default=_a , metadata={"help": "Whether ot not to use whole word mask."})
lowerCamelCase__ : float = field(
default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"})
lowerCamelCase__ : float = field(
default=1 / 6 , metadata={
"help": (
"Ratio of length of a span of masked tokens to surrounding context length for permutation language"
" modeling."
)
} , )
lowerCamelCase__ : int = field(
default=5 , metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."})
lowerCamelCase__ : int = field(
default=-1 , metadata={
"help": (
"Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
)
} , )
lowerCamelCase__ : bool = field(
default=_a , metadata={"help": "Overwrite the cached training and evaluation sets"})
def a_ ( _lowerCAmelCase : DataTrainingArguments , _lowerCAmelCase : PreTrainedTokenizer , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[str] = None , ):
'''simple docstring'''
def _dataset(_lowerCAmelCase : Any , _lowerCAmelCase : Any=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('You need to set world whole masking and mlm to True for Chinese Whole Word Mask' )
return LineByLineWithRefDataset(
tokenizer=_lowerCAmelCase , file_path=_lowerCAmelCase , block_size=args.block_size , ref_path=_lowerCAmelCase , )
return LineByLineTextDataset(tokenizer=_lowerCAmelCase , file_path=_lowerCAmelCase , block_size=args.block_size )
else:
return TextDataset(
tokenizer=_lowerCAmelCase , file_path=_lowerCAmelCase , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=_lowerCAmelCase , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(_lowerCAmelCase ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def a_ ( ):
'''simple docstring'''
lowercase__ : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowercase__ , lowercase__ , lowercase__ : List[Any] = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '
'or remove the --do_eval argument.' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _lowerCAmelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
lowercase__ : List[Any] = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
lowercase__ : Any = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
lowercase__ : Union[str, Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.tokenizer_name:
lowercase__ : Dict = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
lowercase__ : int = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'
' script, save it,and load it from here, using --tokenizer_name' )
if model_args.model_name_or_path:
lowercase__ : Optional[Any] = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_lowerCAmelCase , cache_dir=model_args.cache_dir , )
else:
logger.info('Training new model from scratch' )
lowercase__ : int = AutoModelWithLMHead.from_config(_lowerCAmelCase )
model.resize_token_embeddings(len(_lowerCAmelCase ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'
'--mlm flag (masked language modeling).' )
if data_args.block_size <= 0:
lowercase__ : List[str] = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
lowercase__ : int = min(data_args.block_size , tokenizer.max_len )
# Get datasets
lowercase__ : Tuple = (
get_dataset(_lowerCAmelCase , tokenizer=_lowerCAmelCase , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
lowercase__ : Optional[Any] = (
get_dataset(_lowerCAmelCase , tokenizer=_lowerCAmelCase , evaluate=_lowerCAmelCase , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
lowercase__ : List[Any] = DataCollatorForPermutationLanguageModeling(
tokenizer=_lowerCAmelCase , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
lowercase__ : List[str] = DataCollatorForWholeWordMask(
tokenizer=_lowerCAmelCase , mlm_probability=data_args.mlm_probability )
else:
lowercase__ : str = DataCollatorForLanguageModeling(
tokenizer=_lowerCAmelCase , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
lowercase__ : Optional[int] = Trainer(
model=_lowerCAmelCase , args=_lowerCAmelCase , data_collator=_lowerCAmelCase , train_dataset=_lowerCAmelCase , eval_dataset=_lowerCAmelCase , prediction_loss_only=_lowerCAmelCase , )
# Training
if training_args.do_train:
lowercase__ : List[str] = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=_lowerCAmelCase )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowercase__ : List[Any] = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowercase__ : Dict = trainer.evaluate()
lowercase__ : List[Any] = math.exp(eval_output['eval_loss'] )
lowercase__ : int = {'perplexity': perplexity}
lowercase__ : int = os.path.join(training_args.output_dir , 'eval_results_lm.txt' )
if trainer.is_world_master():
with open(_lowerCAmelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , _lowerCAmelCase , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
results.update(_lowerCAmelCase )
return results
def a_ ( _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 77 | 1 |
"""simple docstring"""
__A : Dict = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
__A : str = [{"type": "code", "content": INSTALL_CONTENT}]
__A : Optional[int] = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 369 |
"""simple docstring"""
import logging
import os
from .state import PartialState
class _a ( logging.LoggerAdapter):
"""simple docstring"""
@staticmethod
def lowercase__ ( __UpperCamelCase : Optional[Any] )->List[Any]:
_UpperCAmelCase = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def lowercase__ ( self : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : Union[str, Any] )->int:
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
_UpperCAmelCase = kwargs.pop('''main_process_only''' , __UpperCamelCase )
_UpperCAmelCase = kwargs.pop('''in_order''' , __UpperCamelCase )
if self.isEnabledFor(__UpperCamelCase ):
if self._should_log(__UpperCamelCase ):
_UpperCAmelCase , _UpperCAmelCase = self.process(__UpperCamelCase , __UpperCamelCase )
self.logger.log(__UpperCamelCase , __UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
elif in_order:
_UpperCAmelCase = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
_UpperCAmelCase , _UpperCAmelCase = self.process(__UpperCamelCase , __UpperCamelCase )
self.logger.log(__UpperCamelCase , __UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
state.wait_for_everyone()
def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str = None ):
'''simple docstring'''
if log_level is None:
_UpperCAmelCase = os.environ.get('''ACCELERATE_LOG_LEVEL''' , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = logging.getLogger(_SCREAMING_SNAKE_CASE )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(_SCREAMING_SNAKE_CASE , {} )
| 326 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.