code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
__snake_case = '''Input must be a string of 8 numbers plus letter'''
__snake_case = '''TRWAGMYFPDXBNJZSQVHLCKE'''
def _A ( _lowercase ) -> bool:
"""simple docstring"""
if not isinstance(_lowercase , _lowercase ):
__UpperCamelCase = f'''Expected string as input, found {type(_lowercase ).__name__}'''
raise TypeError(_lowercase )
__UpperCamelCase = spanish_id.replace('-' , '' ).upper()
if len(_lowercase ) != 9:
raise ValueError(_lowercase )
try:
__UpperCamelCase = int(spanish_id_clean[0:8] )
__UpperCamelCase = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(_lowercase ) from ex
if letter.isdigit():
raise ValueError(_lowercase )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310
|
def _A ( _lowercase ) -> list:
"""simple docstring"""
def merge(_lowercase , _lowercase ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(_lowercase ) <= 1:
return collection
__UpperCamelCase = len(_lowercase ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case = input('''Enter numbers separated by a comma:\n''').strip()
__snake_case = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 310
| 1
|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _A ( _lowercase , _lowercase=0.9_99 , _lowercase="cosine" , ) -> Optional[int]:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(_lowercase ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_lowercase ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
__UpperCamelCase = []
for i in range(_lowercase ):
__UpperCamelCase = i / num_diffusion_timesteps
__UpperCamelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_lowercase ) / alpha_bar_fn(_lowercase ) , _lowercase ) )
return torch.tensor(_lowercase , dtype=torch.floataa )
class __lowerCamelCase (_a , _a ):
_lowercase = [e.name for e in KarrasDiffusionSchedulers]
_lowercase = 2
@register_to_config
def __init__( self: Dict,A_: int = 1000,A_: float = 0.0_0_0_8_5,A_: float = 0.0_1_2,A_: str = "linear",A_: Optional[Union[np.ndarray, List[float]]] = None,A_: str = "epsilon",A_: str = "linspace",A_: int = 0,):
'''simple docstring'''
if trained_betas is not None:
__UpperCamelCase = torch.tensor(A_,dtype=torch.floataa )
elif beta_schedule == "linear":
__UpperCamelCase = torch.linspace(A_,A_,A_,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__UpperCamelCase = (
torch.linspace(beta_start**0.5,beta_end**0.5,A_,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__UpperCamelCase = betas_for_alpha_bar(A_ )
else:
raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' )
__UpperCamelCase = 1.0 - self.betas
__UpperCamelCase = torch.cumprod(self.alphas,dim=0 )
# set all values
self.set_timesteps(A_,A_,A_ )
def snake_case_ ( self: str,A_: int,A_: Optional[Any]=None ):
'''simple docstring'''
if schedule_timesteps is None:
__UpperCamelCase = self.timesteps
__UpperCamelCase = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__UpperCamelCase = 1 if len(A_ ) > 1 else 0
else:
__UpperCamelCase = timestep.cpu().item() if torch.is_tensor(A_ ) else timestep
__UpperCamelCase = self._index_counter[timestep_int]
return indices[pos].item()
@property
def snake_case_ ( self: Tuple ):
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def snake_case_ ( self: List[str],A_: torch.FloatTensor,A_: Union[float, torch.FloatTensor],):
'''simple docstring'''
__UpperCamelCase = self.index_for_timestep(A_ )
if self.state_in_first_order:
__UpperCamelCase = self.sigmas[step_index]
else:
__UpperCamelCase = self.sigmas_interpol[step_index]
__UpperCamelCase = sample / ((sigma**2 + 1) ** 0.5)
return sample
def snake_case_ ( self: List[Any],A_: int,A_: Union[str, torch.device] = None,A_: Optional[int] = None,):
'''simple docstring'''
__UpperCamelCase = num_inference_steps
__UpperCamelCase = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__UpperCamelCase = np.linspace(0,num_train_timesteps - 1,A_,dtype=A_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__UpperCamelCase = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__UpperCamelCase = (np.arange(0,A_ ) * step_ratio).round()[::-1].copy().astype(A_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__UpperCamelCase = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__UpperCamelCase = (np.arange(A_,0,-step_ratio )).round().copy().astype(A_ )
timesteps -= 1
else:
raise ValueError(
F'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
__UpperCamelCase = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__UpperCamelCase = torch.from_numpy(np.log(A_ ) ).to(A_ )
__UpperCamelCase = np.interp(A_,np.arange(0,len(A_ ) ),A_ )
__UpperCamelCase = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__UpperCamelCase = torch.from_numpy(A_ ).to(device=A_ )
# interpolate sigmas
__UpperCamelCase = sigmas.log().lerp(sigmas.roll(1 ).log(),0.5 ).exp()
__UpperCamelCase = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
__UpperCamelCase = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(A_ ).startswith('mps' ):
# mps does not support float64
__UpperCamelCase = torch.from_numpy(A_ ).to(A_,dtype=torch.floataa )
else:
__UpperCamelCase = torch.from_numpy(A_ ).to(A_ )
# interpolate timesteps
__UpperCamelCase = self.sigma_to_t(A_ ).to(A_,dtype=timesteps.dtype )
__UpperCamelCase = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]),dim=-1 ).flatten()
__UpperCamelCase = torch.cat([timesteps[:1], interleaved_timesteps] )
__UpperCamelCase = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__UpperCamelCase = defaultdict(A_ )
def snake_case_ ( self: str,A_: int ):
'''simple docstring'''
__UpperCamelCase = sigma.log()
# get distribution
__UpperCamelCase = log_sigma - self.log_sigmas[:, None]
# get sigmas range
__UpperCamelCase = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
__UpperCamelCase = low_idx + 1
__UpperCamelCase = self.log_sigmas[low_idx]
__UpperCamelCase = self.log_sigmas[high_idx]
# interpolate sigmas
__UpperCamelCase = (low - log_sigma) / (low - high)
__UpperCamelCase = w.clamp(0,1 )
# transform interpolation to time range
__UpperCamelCase = (1 - w) * low_idx + w * high_idx
__UpperCamelCase = t.view(sigma.shape )
return t
@property
def snake_case_ ( self: Tuple ):
'''simple docstring'''
return self.sample is None
def snake_case_ ( self: Union[str, Any],A_: Union[torch.FloatTensor, np.ndarray],A_: Union[float, torch.FloatTensor],A_: Union[torch.FloatTensor, np.ndarray],A_: bool = True,):
'''simple docstring'''
__UpperCamelCase = self.index_for_timestep(A_ )
# advance index counter by 1
__UpperCamelCase = timestep.cpu().item() if torch.is_tensor(A_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__UpperCamelCase = self.sigmas[step_index]
__UpperCamelCase = self.sigmas_interpol[step_index + 1]
__UpperCamelCase = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
__UpperCamelCase = self.sigmas[step_index - 1]
__UpperCamelCase = self.sigmas_interpol[step_index]
__UpperCamelCase = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__UpperCamelCase = 0
__UpperCamelCase = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__UpperCamelCase = sigma_hat if self.state_in_first_order else sigma_interpol
__UpperCamelCase = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__UpperCamelCase = sigma_hat if self.state_in_first_order else sigma_interpol
__UpperCamelCase = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('prediction_type not implemented yet: sample' )
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__UpperCamelCase = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__UpperCamelCase = sigma_interpol - sigma_hat
# store for 2nd order step
__UpperCamelCase = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
__UpperCamelCase = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
__UpperCamelCase = sigma_next - sigma_hat
__UpperCamelCase = self.sample
__UpperCamelCase = None
__UpperCamelCase = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=A_ )
def snake_case_ ( self: List[str],A_: torch.FloatTensor,A_: torch.FloatTensor,A_: torch.FloatTensor,):
'''simple docstring'''
__UpperCamelCase = self.sigmas.to(device=original_samples.device,dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(A_ ):
# mps does not support float64
__UpperCamelCase = self.timesteps.to(original_samples.device,dtype=torch.floataa )
__UpperCamelCase = timesteps.to(original_samples.device,dtype=torch.floataa )
else:
__UpperCamelCase = self.timesteps.to(original_samples.device )
__UpperCamelCase = timesteps.to(original_samples.device )
__UpperCamelCase = [self.index_for_timestep(A_,A_ ) for t in timesteps]
__UpperCamelCase = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__UpperCamelCase = sigma.unsqueeze(-1 )
__UpperCamelCase = original_samples + noise * sigma
return noisy_samples
def __len__( self: Any ):
'''simple docstring'''
return self.config.num_train_timesteps
| 310
|
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class __lowerCamelCase (_a ):
_lowercase = 0
_lowercase = False
_lowercase = 3.0
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: Any ):
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs(),{} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs(),{'a': 2} )
self.assertDictEqual(MockClass(a=2,b=A_ ).to_kwargs(),{'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2,c=2.2_5 ).to_kwargs(),{'a': 2, 'c': 2.2_5} )
@require_cuda
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = GradScalerKwargs(init_scale=1024,growth_factor=2 )
AcceleratorState._reset_state()
__UpperCamelCase = Accelerator(mixed_precision='fp16',kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
__UpperCamelCase = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale,1_0_2_4.0 )
self.assertEqual(scaler._growth_factor,2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor,0.5 )
self.assertEqual(scaler._growth_interval,2000 )
self.assertEqual(scaler._enabled,A_ )
@require_multi_gpu
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = ['torchrun', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(A_,env=os.environ.copy() )
if __name__ == "__main__":
__snake_case = DistributedDataParallelKwargs(bucket_cap_mb=1_5, find_unused_parameters=True)
__snake_case = Accelerator(kwargs_handlers=[ddp_scaler])
__snake_case = torch.nn.Linear(1_0_0, 2_0_0)
__snake_case = accelerator.prepare(model)
# Check the values changed in kwargs
__snake_case = ''''''
__snake_case = model.bucket_bytes_cap // (1_0_2_4 * 1_0_2_4)
if observed_bucket_cap_map != 1_5:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 310
| 1
|
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class __lowerCamelCase (_a ):
_lowercase = 42
_lowercase = jnp.floataa
_lowercase = True
def snake_case_ ( self: Any ):
'''simple docstring'''
super().setup()
__UpperCamelCase = nn.Dense(5,dtype=self.dtype )
def __call__( self: Optional[int],*A_: Dict,**A_: Tuple ):
'''simple docstring'''
__UpperCamelCase = super().__call__(*A_,**A_ )
__UpperCamelCase = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class __lowerCamelCase (_a ):
_lowercase = FlaxBigBirdForNaturalQuestionsModule
def _A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> str:
"""simple docstring"""
def cross_entropy(_lowercase , _lowercase , _lowercase=None ):
__UpperCamelCase = logits.shape[-1]
__UpperCamelCase = (labels[..., None] == jnp.arange(_lowercase )[None]).astype('f4' )
__UpperCamelCase = jax.nn.log_softmax(_lowercase , axis=-1 )
__UpperCamelCase = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
__UpperCamelCase = reduction(_lowercase )
return loss
__UpperCamelCase = partial(_lowercase , reduction=jnp.mean )
__UpperCamelCase = cross_entropy(_lowercase , _lowercase )
__UpperCamelCase = cross_entropy(_lowercase , _lowercase )
__UpperCamelCase = cross_entropy(_lowercase , _lowercase )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class __lowerCamelCase :
_lowercase = "google/bigbird-roberta-base"
_lowercase = 3000
_lowercase = 1_0500
_lowercase = 128
_lowercase = 3
_lowercase = 1
_lowercase = 5
# tx_args
_lowercase = 3e-5
_lowercase = 0.0
_lowercase = 2_0000
_lowercase = 0.0095
_lowercase = "bigbird-roberta-natural-questions"
_lowercase = "training-expt"
_lowercase = "data/nq-training.jsonl"
_lowercase = "data/nq-validation.jsonl"
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
os.makedirs(self.base_dir,exist_ok=A_ )
__UpperCamelCase = os.path.join(self.base_dir,self.save_dir )
__UpperCamelCase = self.batch_size_per_device * jax.device_count()
@dataclass
class __lowerCamelCase :
_lowercase = 42
_lowercase = 4096 # no dynamic padding on TPUs
def __call__( self: Tuple,A_: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = self.collate_fn(A_ )
__UpperCamelCase = jax.tree_util.tree_map(A_,A_ )
return batch
def snake_case_ ( self: List[str],A_: str ):
'''simple docstring'''
__UpperCamelCase, __UpperCamelCase = self.fetch_inputs(features['input_ids'] )
__UpperCamelCase = {
'input_ids': jnp.array(A_,dtype=jnp.intaa ),
'attention_mask': jnp.array(A_,dtype=jnp.intaa ),
'start_labels': jnp.array(features['start_token'],dtype=jnp.intaa ),
'end_labels': jnp.array(features['end_token'],dtype=jnp.intaa ),
'pooled_labels': jnp.array(features['category'],dtype=jnp.intaa ),
}
return batch
def snake_case_ ( self: Tuple,A_: list ):
'''simple docstring'''
__UpperCamelCase = [self._fetch_inputs(A_ ) for ids in input_ids]
return zip(*A_ )
def snake_case_ ( self: int,A_: list ):
'''simple docstring'''
__UpperCamelCase = [1 for _ in range(len(A_ ) )]
while len(A_ ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def _A ( _lowercase , _lowercase , _lowercase=None ) -> Optional[int]:
"""simple docstring"""
if seed is not None:
__UpperCamelCase = dataset.shuffle(seed=_lowercase )
for i in range(len(_lowercase ) // batch_size ):
__UpperCamelCase = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(_lowercase )
@partial(jax.pmap , axis_name='batch' )
def _A ( _lowercase , _lowercase , **_lowercase ) -> Union[str, Any]:
"""simple docstring"""
def loss_fn(_lowercase ):
__UpperCamelCase = model_inputs.pop('start_labels' )
__UpperCamelCase = model_inputs.pop('end_labels' )
__UpperCamelCase = model_inputs.pop('pooled_labels' )
__UpperCamelCase = state.apply_fn(**_lowercase , params=_lowercase , dropout_rng=_lowercase , train=_lowercase )
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = outputs
return state.loss_fn(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , )
__UpperCamelCase, __UpperCamelCase = jax.random.split(_lowercase )
__UpperCamelCase = jax.value_and_grad(_lowercase )
__UpperCamelCase, __UpperCamelCase = grad_fn(state.params )
__UpperCamelCase = jax.lax.pmean({'loss': loss} , axis_name='batch' )
__UpperCamelCase = jax.lax.pmean(_lowercase , 'batch' )
__UpperCamelCase = state.apply_gradients(grads=_lowercase )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name='batch' )
def _A ( _lowercase , **_lowercase ) -> Tuple:
"""simple docstring"""
__UpperCamelCase = model_inputs.pop('start_labels' )
__UpperCamelCase = model_inputs.pop('end_labels' )
__UpperCamelCase = model_inputs.pop('pooled_labels' )
__UpperCamelCase = state.apply_fn(**_lowercase , params=state.params , train=_lowercase )
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = outputs
__UpperCamelCase = state.loss_fn(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
__UpperCamelCase = jax.lax.pmean({'loss': loss} , axis_name='batch' )
return metrics
class __lowerCamelCase (train_state.TrainState ):
_lowercase = struct.field(pytree_node=_a )
@dataclass
class __lowerCamelCase :
_lowercase = 42
_lowercase = 42
_lowercase = 42
_lowercase = 42
_lowercase = 42
_lowercase = 42
_lowercase = None
def snake_case_ ( self: List[Any],A_: Optional[Any],A_: List[Any],A_: Dict,A_: Dict=None ):
'''simple docstring'''
__UpperCamelCase = model.params
__UpperCamelCase = TrainState.create(
apply_fn=model.__call__,params=A_,tx=A_,loss_fn=A_,)
if ckpt_dir is not None:
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = restore_checkpoint(A_,A_ )
__UpperCamelCase = {
'lr': args.lr,
'init_lr': args.init_lr,
'warmup_steps': args.warmup_steps,
'num_train_steps': num_train_steps,
'weight_decay': args.weight_decay,
}
__UpperCamelCase, __UpperCamelCase = build_tx(**A_ )
__UpperCamelCase = train_state.TrainState(
step=A_,apply_fn=model.__call__,params=A_,tx=A_,opt_state=A_,)
__UpperCamelCase = args
__UpperCamelCase = data_collator
__UpperCamelCase = lr
__UpperCamelCase = params
__UpperCamelCase = jax_utils.replicate(A_ )
return state
def snake_case_ ( self: Any,A_: Tuple,A_: Any,A_: Tuple ):
'''simple docstring'''
__UpperCamelCase = self.args
__UpperCamelCase = len(A_ ) // args.batch_size
__UpperCamelCase = jax.random.PRNGKey(0 )
__UpperCamelCase = jax.random.split(A_,jax.device_count() )
for epoch in range(args.max_epochs ):
__UpperCamelCase = jnp.array(0,dtype=jnp.floataa )
__UpperCamelCase = get_batched_dataset(A_,args.batch_size,seed=A_ )
__UpperCamelCase = 0
for batch in tqdm(A_,total=A_,desc=F'''Running EPOCH-{epoch}''' ):
__UpperCamelCase = self.data_collator(A_ )
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = self.train_step_fn(A_,A_,**A_ )
running_loss += jax_utils.unreplicate(metrics['loss'] )
i += 1
if i % args.logging_steps == 0:
__UpperCamelCase = jax_utils.unreplicate(state.step )
__UpperCamelCase = running_loss.item() / i
__UpperCamelCase = self.scheduler_fn(state_step - 1 )
__UpperCamelCase = self.evaluate(A_,A_ )
__UpperCamelCase = {
'step': state_step.item(),
'eval_loss': eval_loss.item(),
'tr_loss': tr_loss,
'lr': lr.item(),
}
tqdm.write(str(A_ ) )
self.logger.log(A_,commit=A_ )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F'''-e{epoch}-s{i}''',state=A_ )
def snake_case_ ( self: Union[str, Any],A_: Any,A_: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = get_batched_dataset(A_,self.args.batch_size )
__UpperCamelCase = len(A_ ) // self.args.batch_size
__UpperCamelCase = jnp.array(0,dtype=jnp.floataa )
__UpperCamelCase = 0
for batch in tqdm(A_,total=A_,desc='Evaluating ... ' ):
__UpperCamelCase = self.data_collator(A_ )
__UpperCamelCase = self.val_step_fn(A_,**A_ )
running_loss += jax_utils.unreplicate(metrics['loss'] )
i += 1
return running_loss / i
def snake_case_ ( self: int,A_: Optional[int],A_: Tuple ):
'''simple docstring'''
__UpperCamelCase = jax_utils.unreplicate(A_ )
print(F'''SAVING CHECKPOINT IN {save_dir}''',end=' ... ' )
self.model_save_fn(A_,params=state.params )
with open(os.path.join(A_,'opt_state.msgpack' ),'wb' ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args,os.path.join(A_,'args.joblib' ) )
joblib.dump(self.data_collator,os.path.join(A_,'data_collator.joblib' ) )
with open(os.path.join(A_,'training_state.json' ),'w' ) as f:
json.dump({'step': state.step.item()},A_ )
print('DONE' )
def _A ( _lowercase , _lowercase ) -> Optional[int]:
"""simple docstring"""
print(f'''RESTORING CHECKPOINT FROM {save_dir}''' , end=' ... ' )
with open(os.path.join(_lowercase , 'flax_model.msgpack' ) , 'rb' ) as f:
__UpperCamelCase = from_bytes(state.params , f.read() )
with open(os.path.join(_lowercase , 'opt_state.msgpack' ) , 'rb' ) as f:
__UpperCamelCase = from_bytes(state.opt_state , f.read() )
__UpperCamelCase = joblib.load(os.path.join(_lowercase , 'args.joblib' ) )
__UpperCamelCase = joblib.load(os.path.join(_lowercase , 'data_collator.joblib' ) )
with open(os.path.join(_lowercase , 'training_state.json' ) , 'r' ) as f:
__UpperCamelCase = json.load(_lowercase )
__UpperCamelCase = training_state['step']
print('DONE' )
return params, opt_state, step, args, data_collator
def _A ( _lowercase , _lowercase , _lowercase , _lowercase ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = num_train_steps - warmup_steps
__UpperCamelCase = optax.linear_schedule(init_value=_lowercase , end_value=_lowercase , transition_steps=_lowercase )
__UpperCamelCase = optax.linear_schedule(init_value=_lowercase , end_value=1e-7 , transition_steps=_lowercase )
__UpperCamelCase = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def _A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[Any]:
"""simple docstring"""
def weight_decay_mask(_lowercase ):
__UpperCamelCase = traverse_util.flatten_dict(_lowercase )
__UpperCamelCase = {k: (v[-1] != 'bias' and v[-2:] != ('LayerNorm', 'scale')) for k, v in params.items()}
return traverse_util.unflatten_dict(_lowercase )
__UpperCamelCase = scheduler_fn(_lowercase , _lowercase , _lowercase , _lowercase )
__UpperCamelCase = optax.adamw(learning_rate=_lowercase , weight_decay=_lowercase , mask=_lowercase )
return tx, lr
| 310
|
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class __lowerCamelCase (_a ):
_lowercase = ["""image_processor""", """tokenizer"""]
_lowercase = """OwlViTImageProcessor"""
_lowercase = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self: int,A_: Tuple=None,A_: int=None,**A_: int ):
'''simple docstring'''
__UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.',A_,)
__UpperCamelCase = kwargs.pop('feature_extractor' )
__UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(A_,A_ )
def __call__( self: str,A_: Dict=None,A_: Optional[int]=None,A_: Any=None,A_: Tuple="max_length",A_: int="np",**A_: Optional[Any] ):
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(A_,A_ ) or (isinstance(A_,A_ ) and not isinstance(text[0],A_ )):
__UpperCamelCase = [self.tokenizer(A_,padding=A_,return_tensors=A_,**A_ )]
elif isinstance(A_,A_ ) and isinstance(text[0],A_ ):
__UpperCamelCase = []
# Maximum number of queries across batch
__UpperCamelCase = max([len(A_ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(A_ ) != max_num_queries:
__UpperCamelCase = t + [' '] * (max_num_queries - len(A_ ))
__UpperCamelCase = self.tokenizer(A_,padding=A_,return_tensors=A_,**A_ )
encodings.append(A_ )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
__UpperCamelCase = np.concatenate([encoding['input_ids'] for encoding in encodings],axis=0 )
__UpperCamelCase = np.concatenate([encoding['attention_mask'] for encoding in encodings],axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__UpperCamelCase = jnp.concatenate([encoding['input_ids'] for encoding in encodings],axis=0 )
__UpperCamelCase = jnp.concatenate([encoding['attention_mask'] for encoding in encodings],axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__UpperCamelCase = torch.cat([encoding['input_ids'] for encoding in encodings],dim=0 )
__UpperCamelCase = torch.cat([encoding['attention_mask'] for encoding in encodings],dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__UpperCamelCase = tf.stack([encoding['input_ids'] for encoding in encodings],axis=0 )
__UpperCamelCase = tf.stack([encoding['attention_mask'] for encoding in encodings],axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
__UpperCamelCase = BatchEncoding()
__UpperCamelCase = input_ids
__UpperCamelCase = attention_mask
if query_images is not None:
__UpperCamelCase = BatchEncoding()
__UpperCamelCase = self.image_processor(
A_,return_tensors=A_,**A_ ).pixel_values
__UpperCamelCase = query_pixel_values
if images is not None:
__UpperCamelCase = self.image_processor(A_,return_tensors=A_,**A_ )
if text is not None and images is not None:
__UpperCamelCase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__UpperCamelCase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**A_ ),tensor_type=A_ )
def snake_case_ ( self: Optional[int],*A_: int,**A_: List[Any] ):
'''simple docstring'''
return self.image_processor.post_process(*A_,**A_ )
def snake_case_ ( self: str,*A_: Optional[int],**A_: List[Any] ):
'''simple docstring'''
return self.image_processor.post_process_object_detection(*A_,**A_ )
def snake_case_ ( self: str,*A_: Tuple,**A_: int ):
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*A_,**A_ )
def snake_case_ ( self: List[str],*A_: str,**A_: List[Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*A_,**A_ )
def snake_case_ ( self: int,*A_: Any,**A_: Tuple ):
'''simple docstring'''
return self.tokenizer.decode(*A_,**A_ )
@property
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.',A_,)
return self.image_processor_class
@property
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.',A_,)
return self.image_processor
| 310
| 1
|
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __lowerCamelCase :
_lowercase = 42
_lowercase = None
# Automatically constructed
_lowercase = "dict"
_lowercase = None
_lowercase = field(default="""Translation""" , init=_a , repr=_a )
def __call__( self: Tuple ):
'''simple docstring'''
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
from .features import Value
return {k: Value('string' ) for k in sorted(self.languages )}
@dataclass
class __lowerCamelCase :
_lowercase = None
_lowercase = None
_lowercase = None
# Automatically constructed
_lowercase = "dict"
_lowercase = None
_lowercase = field(default="""TranslationVariableLanguages""" , init=_a , repr=_a )
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = sorted(set(self.languages ) ) if self.languages else None
__UpperCamelCase = len(self.languages ) if self.languages else None
def __call__( self: Any ):
'''simple docstring'''
return pa.struct({'language': pa.list_(pa.string() ), 'translation': pa.list_(pa.string() )} )
def snake_case_ ( self: str,A_: Dict ):
'''simple docstring'''
__UpperCamelCase = set(self.languages )
if self.languages and set(A_ ) - lang_set:
raise ValueError(
F'''Some languages in example ({', '.join(sorted(set(A_ ) - lang_set ) )}) are not in valid set ({', '.join(A_ )}).''' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
__UpperCamelCase = []
for lang, text in translation_dict.items():
if isinstance(A_,A_ ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
__UpperCamelCase, __UpperCamelCase = zip(*sorted(A_ ) )
return {"language": languages, "translation": translations}
def snake_case_ ( self: List[str] ):
'''simple docstring'''
from .features import Sequence, Value
return {
"language": Sequence(Value('string' ) ),
"translation": Sequence(Value('string' ) ),
}
| 310
|
import math
def _A ( _lowercase ) -> int:
"""simple docstring"""
if not isinstance(_lowercase , _lowercase ):
__UpperCamelCase = f'''Input value of [number={number}] must be an integer'''
raise TypeError(_lowercase )
if number < 1:
__UpperCamelCase = f'''Input value of [number={number}] must be > 0'''
raise ValueError(_lowercase )
elif number == 1:
return 3
elif number == 2:
return 5
else:
__UpperCamelCase = int(math.log(number // 3 , 2 ) ) + 2
__UpperCamelCase = [3, 5]
__UpperCamelCase = 2
__UpperCamelCase = 3
for block in range(1 , _lowercase ):
for _ in range(_lowercase ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(1_1):
__snake_case = 0
try:
__snake_case = proth(number)
except ValueError:
print(f"""ValueError: there is no {number}th Proth number""")
continue
print(f"""The {number}th Proth number: {value}""")
| 310
| 1
|
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class __lowerCamelCase (_a ):
def snake_case_ ( self: Optional[Any],A_: str ):
'''simple docstring'''
with open(A_,encoding='utf-8' ) as input_file:
__UpperCamelCase = re.compile(r'(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)' )
__UpperCamelCase = input_file.read()
__UpperCamelCase = regexp.search(A_ )
return match
def snake_case_ ( self: Optional[Any],A_: str ):
'''simple docstring'''
with open(A_,encoding='utf-8' ) as input_file:
__UpperCamelCase = re.compile(r'#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()',re.DOTALL )
__UpperCamelCase = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
__UpperCamelCase = regexp.finditer(A_ )
__UpperCamelCase = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = Path('./datasets' )
__UpperCamelCase = list(dataset_paths.absolute().glob('**/*.py' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(A_ ) ):
raise AssertionError(F'''open(...) must use utf-8 encoding in {dataset}''' )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = Path('./datasets' )
__UpperCamelCase = list(dataset_paths.absolute().glob('**/*.py' ) )
for dataset in dataset_files:
if self._no_print_statements(str(A_ ) ):
raise AssertionError(F'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
| 310
|
import torch
from transformers import AutoModel
class __lowerCamelCase (torch.nn.Module ):
def __init__( self: Union[str, Any],A_: Tuple="sayef/fsner-bert-base-uncased" ):
'''simple docstring'''
super(A_,self ).__init__()
__UpperCamelCase = AutoModel.from_pretrained(A_,return_dict=A_ )
__UpperCamelCase = torch.nn.CosineSimilarity(3,1E-08 )
__UpperCamelCase = torch.nn.Softmax(dim=1 )
def snake_case_ ( self: Tuple,**A_: Union[str, Any] ):
'''simple docstring'''
return self.bert(**A_ ).last_hidden_state
def snake_case_ ( self: Union[str, Any],A_: Union[str, Any] ):
'''simple docstring'''
return token_embeddings.sum(2,keepdim=A_ )
def snake_case_ ( self: List[str],A_: Dict,A_: Union[str, Any],A_: Union[str, Any]=1 ):
'''simple docstring'''
return self.softmax(T * self.cos(A_,A_ ) )
def snake_case_ ( self: Optional[int],A_: Union[str, Any],A_: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = W_supports['sizes'].tolist()
__UpperCamelCase = W_supports['start_token_id'].item()
__UpperCamelCase = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
__UpperCamelCase = self.BERT(**A_ )
__UpperCamelCase = self.BERT(**A_ )
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = W_supports['input_ids'] == start_token_id
__UpperCamelCase = W_supports['input_ids'] == end_token_id
for i, size in enumerate(A_ ):
if i == 0:
__UpperCamelCase = 0
else:
__UpperCamelCase = support_sizes[i - 1]
__UpperCamelCase = S[s : s + size][start_token_masks[s : s + size]]
__UpperCamelCase = S[s : s + size][end_token_masks[s : s + size]]
__UpperCamelCase = torch.matmul(q[i],s_start.T ).sum(1 ).softmax(0 )
__UpperCamelCase = torch.matmul(q[i],s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
__UpperCamelCase = torch.vstack((p_starts, p_start) )
__UpperCamelCase = torch.vstack((p_ends, p_end) )
else:
__UpperCamelCase = p_start
__UpperCamelCase = p_end
return p_starts, p_ends
| 310
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {}
class __lowerCamelCase (_a ):
_lowercase = """llama"""
_lowercase = ["""past_key_values"""]
def __init__( self: Tuple,A_: List[str]=3_2000,A_: int=4096,A_: str=1_1008,A_: int=32,A_: Optional[Any]=32,A_: List[str]=None,A_: Union[str, Any]="silu",A_: Dict=2048,A_: Optional[Any]=0.0_2,A_: Any=1E-6,A_: int=True,A_: List[str]=0,A_: Any=1,A_: Optional[int]=2,A_: Optional[int]=1,A_: Optional[int]=False,A_: Dict=None,**A_: Optional[int],):
'''simple docstring'''
__UpperCamelCase = vocab_size
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = hidden_size
__UpperCamelCase = intermediate_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__UpperCamelCase = num_attention_heads
__UpperCamelCase = num_key_value_heads
__UpperCamelCase = hidden_act
__UpperCamelCase = initializer_range
__UpperCamelCase = rms_norm_eps
__UpperCamelCase = pretraining_tp
__UpperCamelCase = use_cache
__UpperCamelCase = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=A_,bos_token_id=A_,eos_token_id=A_,tie_word_embeddings=A_,**A_,)
def snake_case_ ( self: Dict ):
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling,A_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
F'''got {self.rope_scaling}''' )
__UpperCamelCase = self.rope_scaling.get('type',A_ )
__UpperCamelCase = self.rope_scaling.get('factor',A_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(A_,A_ ) or rope_scaling_factor <= 1.0:
raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 310
|
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = BioGptTokenizer
_lowercase = False
def snake_case_ ( self: Any ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCamelCase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
__UpperCamelCase = dict(zip(A_,range(len(A_ ) ) ) )
__UpperCamelCase = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
__UpperCamelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file,'w' ) as fp:
fp.write(json.dumps(A_ ) )
with open(self.merges_file,'w' ) as fp:
fp.write('\n'.join(A_ ) )
def snake_case_ ( self: Optional[int],A_: List[Any] ):
'''simple docstring'''
__UpperCamelCase = 'lower newer'
__UpperCamelCase = 'lower newer'
return input_text, output_text
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = BioGptTokenizer(self.vocab_file,self.merges_file )
__UpperCamelCase = 'lower'
__UpperCamelCase = ['low', 'er</w>']
__UpperCamelCase = tokenizer.tokenize(A_ )
self.assertListEqual(A_,A_ )
__UpperCamelCase = tokens + ['<unk>']
__UpperCamelCase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ),A_ )
@slow
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
__UpperCamelCase = tokenizer.encode('sequence builders',add_special_tokens=A_ )
__UpperCamelCase = tokenizer.encode('multi-sequence build',add_special_tokens=A_ )
__UpperCamelCase = tokenizer.build_inputs_with_special_tokens(A_ )
__UpperCamelCase = tokenizer.build_inputs_with_special_tokens(A_,A_ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 310
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
def _A ( _lowercase ) -> YolosConfig:
"""simple docstring"""
__UpperCamelCase = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
__UpperCamelCase = 1_92
__UpperCamelCase = 7_68
__UpperCamelCase = 12
__UpperCamelCase = 3
__UpperCamelCase = [8_00, 13_33]
__UpperCamelCase = False
elif yolos_name == "yolos_s_dWr":
__UpperCamelCase = 3_30
__UpperCamelCase = 14
__UpperCamelCase = 6
__UpperCamelCase = 13_20
elif "yolos_s" in yolos_name:
__UpperCamelCase = 3_84
__UpperCamelCase = 15_36
__UpperCamelCase = 12
__UpperCamelCase = 6
elif "yolos_b" in yolos_name:
__UpperCamelCase = [8_00, 13_44]
__UpperCamelCase = 91
__UpperCamelCase = 'huggingface/label-files'
__UpperCamelCase = 'coco-detection-id2label.json'
__UpperCamelCase = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type='dataset' ) , 'r' ) )
__UpperCamelCase = {int(_lowercase ): v for k, v in idalabel.items()}
__UpperCamelCase = idalabel
__UpperCamelCase = {v: k for k, v in idalabel.items()}
return config
def _A ( _lowercase , _lowercase , _lowercase = False ) -> Dict:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__UpperCamelCase = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
__UpperCamelCase = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__UpperCamelCase = in_proj_weight[: config.hidden_size, :]
__UpperCamelCase = in_proj_bias[: config.hidden_size]
__UpperCamelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__UpperCamelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__UpperCamelCase = in_proj_weight[-config.hidden_size :, :]
__UpperCamelCase = in_proj_bias[-config.hidden_size :]
def _A ( _lowercase ) -> str:
"""simple docstring"""
if "backbone" in name:
__UpperCamelCase = name.replace('backbone' , 'vit' )
if "cls_token" in name:
__UpperCamelCase = name.replace('cls_token' , 'embeddings.cls_token' )
if "det_token" in name:
__UpperCamelCase = name.replace('det_token' , 'embeddings.detection_tokens' )
if "mid_pos_embed" in name:
__UpperCamelCase = name.replace('mid_pos_embed' , 'encoder.mid_position_embeddings' )
if "pos_embed" in name:
__UpperCamelCase = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
__UpperCamelCase = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "blocks" in name:
__UpperCamelCase = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
__UpperCamelCase = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
__UpperCamelCase = name.replace('attn' , 'attention.self' )
if "norm1" in name:
__UpperCamelCase = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
__UpperCamelCase = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
__UpperCamelCase = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
__UpperCamelCase = name.replace('mlp.fc2' , 'output.dense' )
if "class_embed" in name:
__UpperCamelCase = name.replace('class_embed' , 'class_labels_classifier' )
if "bbox_embed" in name:
__UpperCamelCase = name.replace('bbox_embed' , 'bbox_predictor' )
if "vit.norm" in name:
__UpperCamelCase = name.replace('vit.norm' , 'vit.layernorm' )
return name
def _A ( _lowercase , _lowercase ) -> dict:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__UpperCamelCase = orig_state_dict.pop(_lowercase )
if "qkv" in key:
__UpperCamelCase = key.split('.' )
__UpperCamelCase = int(key_split[2] )
__UpperCamelCase = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
__UpperCamelCase = val[:dim, :]
__UpperCamelCase = val[
dim : dim * 2, :
]
__UpperCamelCase = val[-dim:, :]
else:
__UpperCamelCase = val[:dim]
__UpperCamelCase = val[dim : dim * 2]
__UpperCamelCase = val[-dim:]
else:
__UpperCamelCase = val
return orig_state_dict
def _A ( ) -> torch.Tensor:
"""simple docstring"""
__UpperCamelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__UpperCamelCase = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def _A ( _lowercase , _lowercase , _lowercase , _lowercase = False ) -> Tuple:
"""simple docstring"""
__UpperCamelCase = get_yolos_config(_lowercase )
# load original state_dict
__UpperCamelCase = torch.load(_lowercase , map_location='cpu' )['model']
# load 🤗 model
__UpperCamelCase = YolosForObjectDetection(_lowercase )
model.eval()
__UpperCamelCase = convert_state_dict(_lowercase , _lowercase )
model.load_state_dict(_lowercase )
# Check outputs on an image, prepared by YolosImageProcessor
__UpperCamelCase = 8_00 if yolos_name != 'yolos_ti' else 5_12
__UpperCamelCase = YolosImageProcessor(format='coco_detection' , size=_lowercase )
__UpperCamelCase = image_processor(images=prepare_img() , return_tensors='pt' )
__UpperCamelCase = model(**_lowercase )
__UpperCamelCase, __UpperCamelCase = outputs.logits, outputs.pred_boxes
__UpperCamelCase, __UpperCamelCase = None, None
if yolos_name == "yolos_ti":
__UpperCamelCase = torch.tensor(
[[-39.50_22, -11.98_20, -17.68_88], [-29.95_74, -9.97_69, -17.76_91], [-42.32_81, -20.72_00, -30.62_94]] )
__UpperCamelCase = torch.tensor(
[[0.40_21, 0.08_36, 0.79_79], [0.01_84, 0.26_09, 0.03_64], [0.17_81, 0.20_04, 0.20_95]] )
elif yolos_name == "yolos_s_200_pre":
__UpperCamelCase = torch.tensor(
[[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]] )
__UpperCamelCase = torch.tensor(
[[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]] )
elif yolos_name == "yolos_s_300_pre":
__UpperCamelCase = torch.tensor(
[[-36.22_20, -14.43_85, -23.54_57], [-35.69_70, -14.75_83, -21.39_35], [-31.59_39, -13.60_42, -16.80_49]] )
__UpperCamelCase = torch.tensor(
[[0.76_14, 0.23_16, 0.47_28], [0.71_68, 0.44_95, 0.38_55], [0.49_96, 0.14_66, 0.99_96]] )
elif yolos_name == "yolos_s_dWr":
__UpperCamelCase = torch.tensor(
[[-42.86_68, -24.10_49, -41.16_90], [-34.74_56, -14.12_74, -24.91_94], [-33.78_98, -12.19_46, -25.64_95]] )
__UpperCamelCase = torch.tensor(
[[0.55_87, 0.27_73, 0.06_05], [0.50_04, 0.30_14, 0.99_94], [0.49_99, 0.15_48, 0.99_94]] )
elif yolos_name == "yolos_base":
__UpperCamelCase = torch.tensor(
[[-40.60_64, -24.30_84, -32.64_47], [-55.19_90, -30.77_19, -35.58_77], [-51.43_11, -33.35_07, -35.64_62]] )
__UpperCamelCase = torch.tensor(
[[0.55_55, 0.27_94, 0.06_55], [0.90_49, 0.26_64, 0.18_94], [0.91_83, 0.19_84, 0.16_35]] )
else:
raise ValueError(f'''Unknown yolos_name: {yolos_name}''' )
assert torch.allclose(logits[0, :3, :3] , _lowercase , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , _lowercase , atol=1e-4 )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(f'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowercase )
if push_to_hub:
__UpperCamelCase = {
'yolos_ti': 'yolos-tiny',
'yolos_s_200_pre': 'yolos-small',
'yolos_s_300_pre': 'yolos-small-300',
'yolos_s_dWr': 'yolos-small-dwr',
'yolos_base': 'yolos-base',
}
print('Pushing to the hub...' )
__UpperCamelCase = model_mapping[yolos_name]
image_processor.push_to_hub(_lowercase , organization='hustvl' )
model.push_to_hub(_lowercase , organization='hustvl' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__snake_case = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 310
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__snake_case = r'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(_a )
class __lowerCamelCase (_a ):
_lowercase = """rag"""
_lowercase = True
def __init__( self: Tuple,A_: Any=None,A_: Any=True,A_: List[Any]=None,A_: Optional[int]=None,A_: List[Any]=None,A_: str=None,A_: Union[str, Any]=None,A_: List[Any]=" / ",A_: Union[str, Any]=" // ",A_: List[Any]=5,A_: Optional[int]=300,A_: Tuple=768,A_: Tuple=8,A_: Optional[Any]="wiki_dpr",A_: int="train",A_: Union[str, Any]="compressed",A_: Optional[int]=None,A_: List[Any]=None,A_: List[str]=False,A_: List[str]=False,A_: str=0.0,A_: List[Any]=True,A_: Tuple=False,A_: int=False,A_: Dict=False,A_: Tuple=True,A_: int=None,**A_: Optional[int],):
'''simple docstring'''
super().__init__(
bos_token_id=A_,pad_token_id=A_,eos_token_id=A_,decoder_start_token_id=A_,forced_eos_token_id=A_,is_encoder_decoder=A_,prefix=A_,vocab_size=A_,**A_,)
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
__UpperCamelCase = kwargs.pop('question_encoder' )
__UpperCamelCase = question_encoder_config.pop('model_type' )
__UpperCamelCase = kwargs.pop('generator' )
__UpperCamelCase = decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
__UpperCamelCase = AutoConfig.for_model(A_,**A_ )
__UpperCamelCase = AutoConfig.for_model(A_,**A_ )
__UpperCamelCase = reduce_loss
__UpperCamelCase = label_smoothing
__UpperCamelCase = exclude_bos_score
__UpperCamelCase = do_marginalize
__UpperCamelCase = title_sep
__UpperCamelCase = doc_sep
__UpperCamelCase = n_docs
__UpperCamelCase = max_combined_length
__UpperCamelCase = dataset
__UpperCamelCase = dataset_split
__UpperCamelCase = index_name
__UpperCamelCase = retrieval_vector_size
__UpperCamelCase = retrieval_batch_size
__UpperCamelCase = passages_path
__UpperCamelCase = index_path
__UpperCamelCase = use_dummy_dataset
__UpperCamelCase = output_retrieved
__UpperCamelCase = do_deduplication
__UpperCamelCase = use_cache
if self.forced_eos_token_id is None:
__UpperCamelCase = getattr(self.generator,'forced_eos_token_id',A_ )
@classmethod
def snake_case_ ( cls: Any,A_: PretrainedConfig,A_: PretrainedConfig,**A_: int ):
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict(),generator=generator_config.to_dict(),**A_ )
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = copy.deepcopy(self.__dict__ )
__UpperCamelCase = self.question_encoder.to_dict()
__UpperCamelCase = self.generator.to_dict()
__UpperCamelCase = self.__class__.model_type
return output
| 310
| 1
|
from __future__ import annotations
__snake_case = []
def _A ( _lowercase , _lowercase , _lowercase ) -> bool:
"""simple docstring"""
for i in range(len(_lowercase ) ):
if board[row][i] == 1:
return False
for i in range(len(_lowercase ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(_lowercase , -1 , -1 ) , range(_lowercase , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(_lowercase , -1 , -1 ) , range(_lowercase , len(_lowercase ) ) ):
if board[i][j] == 1:
return False
return True
def _A ( _lowercase , _lowercase ) -> bool:
"""simple docstring"""
if row >= len(_lowercase ):
solution.append(_lowercase )
printboard(_lowercase )
print()
return True
for i in range(len(_lowercase ) ):
if is_safe(_lowercase , _lowercase , _lowercase ):
__UpperCamelCase = 1
solve(_lowercase , row + 1 )
__UpperCamelCase = 0
return False
def _A ( _lowercase ) -> None:
"""simple docstring"""
for i in range(len(_lowercase ) ):
for j in range(len(_lowercase ) ):
if board[i][j] == 1:
print('Q' , end=' ' )
else:
print('.' , end=' ' )
print()
# n=int(input("The no. of queens"))
__snake_case = 8
__snake_case = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('''The total no. of solutions are :''', len(solution))
| 310
|
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class __lowerCamelCase (_a ):
_lowercase = """M-CLIP"""
def __init__( self: int,A_: Any=1024,A_: Union[str, Any]=768,**A_: str ):
'''simple docstring'''
__UpperCamelCase = transformerDimSize
__UpperCamelCase = imageDimSize
super().__init__(**A_ )
class __lowerCamelCase (_a ):
_lowercase = MCLIPConfig
def __init__( self: int,A_: Optional[Any],*A_: List[str],**A_: Union[str, Any] ):
'''simple docstring'''
super().__init__(A_,*A_,**A_ )
__UpperCamelCase = XLMRobertaModel(A_ )
__UpperCamelCase = torch.nn.Linear(
in_features=config.transformerDimensions,out_features=config.numDims )
def snake_case_ ( self: Dict,A_: int,A_: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = self.transformer(input_ids=A_,attention_mask=A_ )[0]
__UpperCamelCase = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(A_ ), embs
| 310
| 1
|
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__snake_case = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json'''
with io.open(filename, '''r''', encoding='''utf-8''') as f:
__snake_case = json.load(f)
@require_torch
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: int,A_: int ):
'''simple docstring'''
return FSMTTokenizer.from_pretrained(A_ )
def snake_case_ ( self: Dict,A_: int ):
'''simple docstring'''
__UpperCamelCase = FSMTForConditionalGeneration.from_pretrained(A_ ).to(A_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 2_6.0],
['ru-en', 2_2.0],
['en-de', 2_2.0],
['de-en', 2_9.0],
] )
@slow
def snake_case_ ( self: Tuple,A_: Any,A_: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = F'''facebook/wmt19-{pair}'''
__UpperCamelCase = self.get_tokenizer(A_ )
__UpperCamelCase = self.get_model(A_ )
__UpperCamelCase = bleu_data[pair]['src']
__UpperCamelCase = bleu_data[pair]['tgt']
__UpperCamelCase = tokenizer(A_,return_tensors='pt',truncation=A_,padding='longest' ).to(A_ )
__UpperCamelCase = model.generate(
input_ids=batch.input_ids,num_beams=8,)
__UpperCamelCase = tokenizer.batch_decode(
A_,skip_special_tokens=A_,clean_up_tokenization_spaces=A_ )
__UpperCamelCase = calculate_bleu(A_,A_ )
print(A_ )
self.assertGreaterEqual(scores['bleu'],A_ )
| 310
|
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class __lowerCamelCase :
_lowercase = XGLMConfig
_lowercase = {}
_lowercase = """gelu"""
def __init__( self: Optional[int],A_: Dict,A_: Any=14,A_: Optional[int]=7,A_: str=True,A_: Any=True,A_: Optional[int]=True,A_: Optional[int]=99,A_: List[str]=32,A_: Any=2,A_: Tuple=4,A_: List[str]=37,A_: Dict="gelu",A_: int=0.1,A_: List[str]=0.1,A_: int=512,A_: List[Any]=0.0_2,):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_input_mask
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = d_model
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = ffn_dim
__UpperCamelCase = activation_function
__UpperCamelCase = activation_dropout
__UpperCamelCase = attention_dropout
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = initializer_range
__UpperCamelCase = None
__UpperCamelCase = 0
__UpperCamelCase = 2
__UpperCamelCase = 1
def snake_case_ ( self: Dict ):
'''simple docstring'''
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length],self.vocab_size ),clip_value_min=0,clip_value_max=3 )
__UpperCamelCase = None
if self.use_input_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = self.get_config()
__UpperCamelCase = floats_tensor([self.num_hidden_layers, self.num_attention_heads],2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
return XGLMConfig(
vocab_size=self.vocab_size,d_model=self.hidden_size,num_layers=self.num_hidden_layers,attention_heads=self.num_attention_heads,ffn_dim=self.ffn_dim,activation_function=self.activation_function,activation_dropout=self.activation_dropout,attention_dropout=self.attention_dropout,max_position_embeddings=self.max_position_embeddings,initializer_range=self.initializer_range,use_cache=A_,bos_token_id=self.bos_token_id,eos_token_id=self.eos_token_id,pad_token_id=self.pad_token_id,return_dict=A_,)
def snake_case_ ( self: int ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
), (
__UpperCamelCase
), (
__UpperCamelCase
), (
__UpperCamelCase
),
) = config_and_inputs
__UpperCamelCase = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class __lowerCamelCase (_a , _a , unittest.TestCase ):
_lowercase = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
_lowercase = (TFXGLMForCausalLM,) if is_tf_available() else ()
_lowercase = (
{"""feature-extraction""": TFXGLMModel, """text-generation""": TFXGLMForCausalLM} if is_tf_available() else {}
)
_lowercase = False
_lowercase = False
_lowercase = False
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = TFXGLMModelTester(self )
__UpperCamelCase = ConfigTester(self,config_class=A_,n_embd=37 )
def snake_case_ ( self: Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
@slow
def snake_case_ ( self: Any ):
'''simple docstring'''
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = TFXGLMModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def snake_case_ ( self: Tuple ):
'''simple docstring'''
super().test_resize_token_embeddings()
@require_tf
class __lowerCamelCase (unittest.TestCase ):
@slow
def snake_case_ ( self: Optional[Any],A_: int=True ):
'''simple docstring'''
__UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = tf.convert_to_tensor([[2, 268, 9865]],dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__UpperCamelCase = [2, 268, 9865, 67, 11, 1988, 5_7252, 9865, 5, 984, 67, 1988, 21_3838, 1658, 53, 7_0446, 33, 6657, 278, 1581]
# fmt: on
__UpperCamelCase = model.generate(A_,do_sample=A_,num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist(),A_ )
@slow
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
__UpperCamelCase = tokenizer('Today is a nice day and',return_tensors='tf' )
__UpperCamelCase = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
__UpperCamelCase = model.generate(A_,do_sample=A_,seed=[7, 0] )
__UpperCamelCase = tokenizer.decode(output_ids[0],skip_special_tokens=A_ )
__UpperCamelCase = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(A_,A_ )
@slow
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = 'left'
# use different length sentences to test batching
__UpperCamelCase = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
__UpperCamelCase = tokenizer(A_,return_tensors='tf',padding=A_ )
__UpperCamelCase = inputs['input_ids']
__UpperCamelCase = model.generate(input_ids=A_,attention_mask=inputs['attention_mask'],max_new_tokens=12 )
__UpperCamelCase = tokenizer(sentences[0],return_tensors='tf' ).input_ids
__UpperCamelCase = model.generate(input_ids=A_,max_new_tokens=12 )
__UpperCamelCase = tokenizer(sentences[1],return_tensors='tf' ).input_ids
__UpperCamelCase = model.generate(input_ids=A_,max_new_tokens=12 )
__UpperCamelCase = tokenizer.batch_decode(A_,skip_special_tokens=A_ )
__UpperCamelCase = tokenizer.decode(output_non_padded[0],skip_special_tokens=A_ )
__UpperCamelCase = tokenizer.decode(output_padded[0],skip_special_tokens=A_ )
__UpperCamelCase = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(A_,A_ )
self.assertListEqual(A_,[non_padded_sentence, padded_sentence] )
| 310
| 1
|
import baseaa
def _A ( _lowercase ) -> bytes:
"""simple docstring"""
return baseaa.baaencode(string.encode('utf-8' ) )
def _A ( _lowercase ) -> str:
"""simple docstring"""
return baseaa.baadecode(_lowercase ).decode('utf-8' )
if __name__ == "__main__":
__snake_case = '''Hello World!'''
__snake_case = baseaa_encode(test)
print(encoded)
__snake_case = baseaa_decode(encoded)
print(decoded)
| 310
|
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__snake_case = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json'''
with io.open(filename, '''r''', encoding='''utf-8''') as f:
__snake_case = json.load(f)
@require_torch
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: int,A_: int ):
'''simple docstring'''
return FSMTTokenizer.from_pretrained(A_ )
def snake_case_ ( self: Dict,A_: int ):
'''simple docstring'''
__UpperCamelCase = FSMTForConditionalGeneration.from_pretrained(A_ ).to(A_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 2_6.0],
['ru-en', 2_2.0],
['en-de', 2_2.0],
['de-en', 2_9.0],
] )
@slow
def snake_case_ ( self: Tuple,A_: Any,A_: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = F'''facebook/wmt19-{pair}'''
__UpperCamelCase = self.get_tokenizer(A_ )
__UpperCamelCase = self.get_model(A_ )
__UpperCamelCase = bleu_data[pair]['src']
__UpperCamelCase = bleu_data[pair]['tgt']
__UpperCamelCase = tokenizer(A_,return_tensors='pt',truncation=A_,padding='longest' ).to(A_ )
__UpperCamelCase = model.generate(
input_ids=batch.input_ids,num_beams=8,)
__UpperCamelCase = tokenizer.batch_decode(
A_,skip_special_tokens=A_,clean_up_tokenization_spaces=A_ )
__UpperCamelCase = calculate_bleu(A_,A_ )
print(A_ )
self.assertGreaterEqual(scores['bleu'],A_ )
| 310
| 1
|
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
__snake_case = logging.getLogger(__name__)
class __lowerCamelCase (_a ):
_lowercase = """sequence-classification"""
def __init__( self: Dict,A_: Tuple ):
'''simple docstring'''
if type(A_ ) == dict:
__UpperCamelCase = Namespace(**A_ )
__UpperCamelCase = glue_output_modes[hparams.task]
__UpperCamelCase = glue_tasks_num_labels[hparams.task]
super().__init__(A_,A_,self.mode )
def snake_case_ ( self: Optional[Any],**A_: int ):
'''simple docstring'''
return self.model(**A_ )
def snake_case_ ( self: Optional[int],A_: Union[str, Any],A_: List[str] ):
'''simple docstring'''
__UpperCamelCase = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__UpperCamelCase = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
__UpperCamelCase = self(**A_ )
__UpperCamelCase = outputs[0]
__UpperCamelCase = self.trainer.lr_schedulers[0]['scheduler']
__UpperCamelCase = {'loss': loss, 'rate': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = self.hparams
__UpperCamelCase = processors[args.task]()
__UpperCamelCase = processor.get_labels()
for mode in ["train", "dev"]:
__UpperCamelCase = self._feature_file(A_ )
if os.path.exists(A_ ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s',A_ )
else:
logger.info('Creating features from dataset file at %s',args.data_dir )
__UpperCamelCase = (
processor.get_dev_examples(args.data_dir )
if mode == 'dev'
else processor.get_train_examples(args.data_dir )
)
__UpperCamelCase = convert_examples_to_features(
A_,self.tokenizer,max_length=args.max_seq_length,label_list=self.labels,output_mode=args.glue_output_mode,)
logger.info('Saving features into cached file %s',A_ )
torch.save(A_,A_ )
def snake_case_ ( self: int,A_: str,A_: int,A_: bool = False ):
'''simple docstring'''
__UpperCamelCase = 'dev' if mode == 'test' else mode
__UpperCamelCase = self._feature_file(A_ )
logger.info('Loading features from cached file %s',A_ )
__UpperCamelCase = torch.load(A_ )
__UpperCamelCase = torch.tensor([f.input_ids for f in features],dtype=torch.long )
__UpperCamelCase = torch.tensor([f.attention_mask for f in features],dtype=torch.long )
__UpperCamelCase = torch.tensor([f.token_type_ids for f in features],dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
__UpperCamelCase = torch.tensor([f.label for f in features],dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
__UpperCamelCase = torch.tensor([f.label for f in features],dtype=torch.float )
return DataLoader(
TensorDataset(A_,A_,A_,A_ ),batch_size=A_,shuffle=A_,)
def snake_case_ ( self: Union[str, Any],A_: List[Any],A_: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__UpperCamelCase = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
__UpperCamelCase = self(**A_ )
__UpperCamelCase, __UpperCamelCase = outputs[:2]
__UpperCamelCase = logits.detach().cpu().numpy()
__UpperCamelCase = inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def snake_case_ ( self: str,A_: List[Any] ):
'''simple docstring'''
__UpperCamelCase = torch.stack([x['val_loss'] for x in outputs] ).mean().detach().cpu().item()
__UpperCamelCase = np.concatenate([x['pred'] for x in outputs],axis=0 )
if self.hparams.glue_output_mode == "classification":
__UpperCamelCase = np.argmax(A_,axis=1 )
elif self.hparams.glue_output_mode == "regression":
__UpperCamelCase = np.squeeze(A_ )
__UpperCamelCase = np.concatenate([x['target'] for x in outputs],axis=0 )
__UpperCamelCase = [[] for _ in range(out_label_ids.shape[0] )]
__UpperCamelCase = [[] for _ in range(out_label_ids.shape[0] )]
__UpperCamelCase = {**{'val_loss': val_loss_mean}, **compute_metrics(self.hparams.task,A_,A_ )}
__UpperCamelCase = dict(results.items() )
__UpperCamelCase = results
return ret, preds_list, out_label_list
def snake_case_ ( self: Dict,A_: list ):
'''simple docstring'''
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = self._eval_end(A_ )
__UpperCamelCase = ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def snake_case_ ( self: Dict,A_: Dict ):
'''simple docstring'''
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = self._eval_end(A_ )
__UpperCamelCase = ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def snake_case_ ( A_: Union[str, Any],A_: Any ):
'''simple docstring'''
BaseTransformer.add_model_specific_args(A_,A_ )
parser.add_argument(
'--max_seq_length',default=128,type=A_,help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
),)
parser.add_argument(
'--task',default='',type=A_,required=A_,help='The GLUE task to run',)
parser.add_argument(
'--gpus',default=0,type=A_,help='The number of GPUs allocated for this, it is by default 0 meaning none',)
parser.add_argument(
'--overwrite_cache',action='store_true',help='Overwrite the cached training and evaluation sets' )
return parser
def _A ( ) -> int:
"""simple docstring"""
__UpperCamelCase = argparse.ArgumentParser()
add_generic_args(_lowercase , os.getcwd() )
__UpperCamelCase = GLUETransformer.add_model_specific_args(_lowercase , os.getcwd() )
__UpperCamelCase = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
__UpperCamelCase = os.path.join(
'./results' , f'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' , )
os.makedirs(args.output_dir )
__UpperCamelCase = GLUETransformer(_lowercase )
__UpperCamelCase = generic_train(_lowercase , _lowercase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
__UpperCamelCase = sorted(glob.glob(os.path.join(args.output_dir , 'checkpoint-epoch=*.ckpt' ) , recursive=_lowercase ) )
__UpperCamelCase = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_lowercase )
if __name__ == "__main__":
main()
| 310
|
def _A ( _lowercase ) -> list[int]:
"""simple docstring"""
if length <= 0 or not isinstance(_lowercase , _lowercase ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(_lowercase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=1_0))
| 310
| 1
|
from numpy import exp, pi, sqrt
def _A ( _lowercase , _lowercase = 0.0 , _lowercase = 1.0 ) -> int:
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310
|
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = MgpstrTokenizer
_lowercase = False
_lowercase = {}
_lowercase = False
def snake_case_ ( self: int ):
'''simple docstring'''
super().setUp()
# fmt: off
__UpperCamelCase = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
__UpperCamelCase = dict(zip(A_,range(len(A_ ) ) ) )
__UpperCamelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file,'w',encoding='utf-8' ) as fp:
fp.write(json.dumps(A_ ) + '\n' )
def snake_case_ ( self: Dict,**A_: Tuple ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname,**A_ )
def snake_case_ ( self: List[Any],A_: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = 'tester'
__UpperCamelCase = 'tester'
return input_text, output_text
@unittest.skip('MGP-STR always lower cases letters.' )
def snake_case_ ( self: str ):
'''simple docstring'''
pass
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = self.get_tokenizers(do_lower_case=A_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__UpperCamelCase = '[SPECIAL_TOKEN]'
tokenizer.add_special_tokens({'cls_token': special_token} )
__UpperCamelCase = tokenizer.encode([special_token],add_special_tokens=A_ )
self.assertEqual(len(A_ ),1 )
__UpperCamelCase = tokenizer.decode(A_,skip_special_tokens=A_ )
self.assertTrue(special_token not in decoded )
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__UpperCamelCase, __UpperCamelCase = self.get_input_output_texts(A_ )
__UpperCamelCase = tokenizer.tokenize(A_ )
__UpperCamelCase = tokenizer.convert_tokens_to_ids(A_ )
__UpperCamelCase = tokenizer.encode(A_,add_special_tokens=A_ )
self.assertListEqual(A_,A_ )
__UpperCamelCase = tokenizer.convert_ids_to_tokens(A_ )
self.assertNotEqual(len(A_ ),0 )
__UpperCamelCase = tokenizer.decode(A_ )
self.assertIsInstance(A_,A_ )
self.assertEqual(text_a.replace(' ','' ),A_ )
@unittest.skip('MGP-STR tokenizer only handles one sequence.' )
def snake_case_ ( self: int ):
'''simple docstring'''
pass
@unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer' )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
pass
| 310
| 1
|
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
'compression_format, is_archive' , [
('7z', True),
('bz2', False),
('gzip', False),
('lz4', False),
('tar', True),
('xz', False),
('zip', True),
('zstd', False),
] , )
def _A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = {
'7z': (seven_zip_file, SevenZipExtractor),
'bz2': (bza_file, BzipaExtractor),
'gzip': (gz_file, GzipExtractor),
'lz4': (lza_file, LzaExtractor),
'tar': (tar_file, TarExtractor),
'xz': (xz_file, XzExtractor),
'zip': (zip_file, ZipExtractor),
'zstd': (zstd_file, ZstdExtractor),
}
__UpperCamelCase, __UpperCamelCase = input_paths_and_base_extractors[compression_format]
if input_path is None:
__UpperCamelCase = f'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(_lowercase )
assert base_extractor.is_extractable(_lowercase )
__UpperCamelCase = tmp_path / ('extracted' if is_archive else 'extracted.txt')
base_extractor.extract(_lowercase , _lowercase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
__UpperCamelCase = file_path.read_text(encoding='utf-8' )
else:
__UpperCamelCase = output_path.read_text(encoding='utf-8' )
__UpperCamelCase = text_file.read_text(encoding='utf-8' )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
'compression_format, is_archive' , [
('7z', True),
('bz2', False),
('gzip', False),
('lz4', False),
('tar', True),
('xz', False),
('zip', True),
('zstd', False),
] , )
def _A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ) -> Dict:
"""simple docstring"""
__UpperCamelCase = {
'7z': seven_zip_file,
'bz2': bza_file,
'gzip': gz_file,
'lz4': lza_file,
'tar': tar_file,
'xz': xz_file,
'zip': zip_file,
'zstd': zstd_file,
}
__UpperCamelCase = input_paths[compression_format]
if input_path is None:
__UpperCamelCase = f'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(_lowercase )
__UpperCamelCase = Extractor.infer_extractor_format(_lowercase )
assert extractor_format is not None
__UpperCamelCase = tmp_path / ('extracted' if is_archive else 'extracted.txt')
Extractor.extract(_lowercase , _lowercase , _lowercase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
__UpperCamelCase = file_path.read_text(encoding='utf-8' )
else:
__UpperCamelCase = output_path.read_text(encoding='utf-8' )
__UpperCamelCase = text_file.read_text(encoding='utf-8' )
assert extracted_file_content == expected_file_content
@pytest.fixture
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
import tarfile
__UpperCamelCase = tmp_path / 'data_dot_dot'
directory.mkdir()
__UpperCamelCase = directory / 'tar_file_with_dot_dot.tar'
with tarfile.TarFile(_lowercase , 'w' ) as f:
f.add(_lowercase , arcname=os.path.join('..' , text_file.name ) )
return path
@pytest.fixture
def _A ( _lowercase ) -> str:
"""simple docstring"""
import tarfile
__UpperCamelCase = tmp_path / 'data_sym_link'
directory.mkdir()
__UpperCamelCase = directory / 'tar_file_with_sym_link.tar'
os.symlink('..' , directory / 'subdir' , target_is_directory=_lowercase )
with tarfile.TarFile(_lowercase , 'w' ) as f:
f.add(str(directory / 'subdir' ) , arcname='subdir' ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
'insecure_tar_file, error_log' , [('tar_file_with_dot_dot', 'illegal path'), ('tar_file_with_sym_link', 'Symlink')] , )
def _A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = {
'tar_file_with_dot_dot': tar_file_with_dot_dot,
'tar_file_with_sym_link': tar_file_with_sym_link,
}
__UpperCamelCase = insecure_tar_files[insecure_tar_file]
__UpperCamelCase = tmp_path / 'extracted'
TarExtractor.extract(_lowercase , _lowercase )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def _A ( _lowercase ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = tmpdir / 'not_a_zip_file'
# From: https://github.com/python/cpython/pull/5053
__UpperCamelCase = (
B'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00'
B'\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I'
B'DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07'
B'\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82'
)
with not_a_zip_file.open('wb' ) as f:
f.write(_lowercase )
assert zipfile.is_zipfile(str(_lowercase ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(_lowercase ) # but we're right
| 310
|
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"""The RoBERTa Model transformer with early exiting (DeeRoBERTa). """ , _a , )
class __lowerCamelCase (_a ):
_lowercase = RobertaConfig
_lowercase = """roberta"""
def __init__( self: Union[str, Any],A_: List[str] ):
'''simple docstring'''
super().__init__(A_ )
__UpperCamelCase = RobertaEmbeddings(A_ )
self.init_weights()
@add_start_docstrings(
"""RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. """ , _a , )
class __lowerCamelCase (_a ):
_lowercase = RobertaConfig
_lowercase = """roberta"""
def __init__( self: Any,A_: int ):
'''simple docstring'''
super().__init__(A_ )
__UpperCamelCase = config.num_labels
__UpperCamelCase = config.num_hidden_layers
__UpperCamelCase = DeeRobertaModel(A_ )
__UpperCamelCase = nn.Dropout(config.hidden_dropout_prob )
__UpperCamelCase = nn.Linear(config.hidden_size,self.config.num_labels )
@add_start_docstrings_to_model_forward(A_ )
def snake_case_ ( self: List[str],A_: int=None,A_: List[Any]=None,A_: List[str]=None,A_: List[str]=None,A_: Optional[int]=None,A_: List[str]=None,A_: Any=None,A_: List[Any]=-1,A_: List[Any]=False,):
'''simple docstring'''
__UpperCamelCase = self.num_layers
try:
__UpperCamelCase = self.roberta(
A_,attention_mask=A_,token_type_ids=A_,position_ids=A_,head_mask=A_,inputs_embeds=A_,)
__UpperCamelCase = outputs[1]
__UpperCamelCase = self.dropout(A_ )
__UpperCamelCase = self.classifier(A_ )
__UpperCamelCase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__UpperCamelCase = e.message
__UpperCamelCase = e.exit_layer
__UpperCamelCase = outputs[0]
if not self.training:
__UpperCamelCase = entropy(A_ )
__UpperCamelCase = []
__UpperCamelCase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__UpperCamelCase = MSELoss()
__UpperCamelCase = loss_fct(logits.view(-1 ),labels.view(-1 ) )
else:
__UpperCamelCase = CrossEntropyLoss()
__UpperCamelCase = loss_fct(logits.view(-1,self.num_labels ),labels.view(-1 ) )
# work with highway exits
__UpperCamelCase = []
for highway_exit in outputs[-1]:
__UpperCamelCase = highway_exit[0]
if not self.training:
highway_logits_all.append(A_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__UpperCamelCase = MSELoss()
__UpperCamelCase = loss_fct(highway_logits.view(-1 ),labels.view(-1 ) )
else:
__UpperCamelCase = CrossEntropyLoss()
__UpperCamelCase = loss_fct(highway_logits.view(-1,self.num_labels ),labels.view(-1 ) )
highway_losses.append(A_ )
if train_highway:
__UpperCamelCase = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__UpperCamelCase = (loss,) + outputs
if not self.training:
__UpperCamelCase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__UpperCamelCase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 310
| 1
|
from typing import Any
def _A ( _lowercase ) -> list[Any]:
"""simple docstring"""
if not input_list:
return []
__UpperCamelCase = [input_list.count(_lowercase ) for value in input_list]
__UpperCamelCase = max(_lowercase ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(_lowercase ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310
|
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCamelCase :
@staticmethod
def snake_case_ ( *A_: Optional[Any],**A_: Tuple ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __lowerCamelCase (unittest.TestCase ):
_lowercase = MODEL_FOR_OBJECT_DETECTION_MAPPING
def snake_case_ ( self: Dict,A_: Optional[int],A_: Tuple,A_: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = ObjectDetectionPipeline(model=A_,image_processor=A_ )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def snake_case_ ( self: int,A_: Any,A_: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = object_detector('./tests/fixtures/tests_samples/COCO/000000039769.png',threshold=0.0 )
self.assertGreater(len(A_ ),0 )
for detected_object in outputs:
self.assertEqual(
A_,{
'score': ANY(A_ ),
'label': ANY(A_ ),
'box': {'xmin': ANY(A_ ), 'ymin': ANY(A_ ), 'xmax': ANY(A_ ), 'ymax': ANY(A_ )},
},)
import datasets
__UpperCamelCase = datasets.load_dataset('hf-internal-testing/fixtures_image_utils','image',split='test' )
__UpperCamelCase = [
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
]
__UpperCamelCase = object_detector(A_,threshold=0.0 )
self.assertEqual(len(A_ ),len(A_ ) )
for outputs in batch_outputs:
self.assertGreater(len(A_ ),0 )
for detected_object in outputs:
self.assertEqual(
A_,{
'score': ANY(A_ ),
'label': ANY(A_ ),
'box': {'xmin': ANY(A_ ), 'ymin': ANY(A_ ), 'xmax': ANY(A_ ), 'ymax': ANY(A_ )},
},)
@require_tf
@unittest.skip('Object detection not implemented in TF' )
def snake_case_ ( self: str ):
'''simple docstring'''
pass
@require_torch
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = 'hf-internal-testing/tiny-detr-mobilenetsv3'
__UpperCamelCase = AutoModelForObjectDetection.from_pretrained(A_ )
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(A_ )
__UpperCamelCase = ObjectDetectionPipeline(model=A_,feature_extractor=A_ )
__UpperCamelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg',threshold=0.0 )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],)
__UpperCamelCase = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
],threshold=0.0,)
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
[
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
[
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
],)
@require_torch
@slow
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = 'facebook/detr-resnet-50'
__UpperCamelCase = AutoModelForObjectDetection.from_pretrained(A_ )
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(A_ )
__UpperCamelCase = ObjectDetectionPipeline(model=A_,feature_extractor=A_ )
__UpperCamelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],)
__UpperCamelCase = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
],)
@require_torch
@slow
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = 'facebook/detr-resnet-50'
__UpperCamelCase = pipeline('object-detection',model=A_ )
__UpperCamelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],)
__UpperCamelCase = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
],)
@require_torch
@slow
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = 0.9_9_8_5
__UpperCamelCase = 'facebook/detr-resnet-50'
__UpperCamelCase = pipeline('object-detection',model=A_ )
__UpperCamelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg',threshold=A_ )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],)
@require_torch
@require_pytesseract
@slow
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = 'Narsil/layoutlmv3-finetuned-funsd'
__UpperCamelCase = 0.9_9_9_3
__UpperCamelCase = pipeline('object-detection',model=A_,threshold=A_ )
__UpperCamelCase = object_detector(
'https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png' )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.9_9_9_3, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
{'score': 0.9_9_9_3, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
],)
| 310
| 1
|
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def _A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=True , _lowercase="pt" ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = {'add_prefix_space': True} if isinstance(_lowercase , _lowercase ) and not line.startswith(' ' ) else {}
__UpperCamelCase = padding_side
return tokenizer(
[line] , max_length=_lowercase , padding='max_length' if pad_to_max_length else None , truncation=_lowercase , return_tensors=_lowercase , add_special_tokens=_lowercase , **_lowercase , )
def _A ( _lowercase , _lowercase , _lowercase=None , ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = input_ids.ne(_lowercase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCamelCase (_a ):
def __init__( self: List[str],A_: str,A_: List[str],A_: List[str],A_: List[str],A_: Tuple="train",A_: Any=None,A_: List[str]=None,A_: List[Any]=None,A_: int="",):
'''simple docstring'''
super().__init__()
__UpperCamelCase = Path(A_ ).joinpath(type_path + '.source' )
__UpperCamelCase = Path(A_ ).joinpath(type_path + '.target' )
__UpperCamelCase = self.get_char_lens(self.src_file )
__UpperCamelCase = max_source_length
__UpperCamelCase = max_target_length
assert min(self.src_lens ) > 0, F'''found empty line in {self.src_file}'''
__UpperCamelCase = tokenizer
__UpperCamelCase = prefix
if n_obs is not None:
__UpperCamelCase = self.src_lens[:n_obs]
__UpperCamelCase = src_lang
__UpperCamelCase = tgt_lang
def __len__( self: Optional[Any] ):
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self: int,A_: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = index + 1 # linecache starts at 1
__UpperCamelCase = self.prefix + linecache.getline(str(self.src_file ),A_ ).rstrip('\n' )
__UpperCamelCase = linecache.getline(str(self.tgt_file ),A_ ).rstrip('\n' )
assert source_line, F'''empty source line for index {index}'''
assert tgt_line, F'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer,A_ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__UpperCamelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer,A_ ) else self.tokenizer
)
__UpperCamelCase = self.tokenizer.generator if isinstance(self.tokenizer,A_ ) else self.tokenizer
__UpperCamelCase = encode_line(A_,A_,self.max_source_length,'right' )
__UpperCamelCase = encode_line(A_,A_,self.max_target_length,'right' )
__UpperCamelCase = source_inputs['input_ids'].squeeze()
__UpperCamelCase = target_inputs['input_ids'].squeeze()
__UpperCamelCase = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def snake_case_ ( A_: List[Any] ):
'''simple docstring'''
return [len(A_ ) for x in Path(A_ ).open().readlines()]
def snake_case_ ( self: Union[str, Any],A_: Any ):
'''simple docstring'''
__UpperCamelCase = torch.stack([x['input_ids'] for x in batch] )
__UpperCamelCase = torch.stack([x['attention_mask'] for x in batch] )
__UpperCamelCase = torch.stack([x['decoder_input_ids'] for x in batch] )
__UpperCamelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer,A_ )
else self.tokenizer.pad_token_id
)
__UpperCamelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer,A_ )
else self.tokenizer.pad_token_id
)
__UpperCamelCase = trim_batch(A_,A_ )
__UpperCamelCase, __UpperCamelCase = trim_batch(A_,A_,attention_mask=A_ )
__UpperCamelCase = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
__snake_case = getLogger(__name__)
def _A ( _lowercase ) -> Any:
"""simple docstring"""
return list(itertools.chain.from_iterable(_lowercase ) )
def _A ( _lowercase ) -> None:
"""simple docstring"""
__UpperCamelCase = get_git_info()
save_json(_lowercase , os.path.join(_lowercase , 'git_log.json' ) )
def _A ( _lowercase , _lowercase , _lowercase=4 , **_lowercase ) -> List[Any]:
"""simple docstring"""
with open(_lowercase , 'w' ) as f:
json.dump(_lowercase , _lowercase , indent=_lowercase , **_lowercase )
def _A ( _lowercase ) -> Union[str, Any]:
"""simple docstring"""
with open(_lowercase ) as f:
return json.load(_lowercase )
def _A ( ) -> Dict:
"""simple docstring"""
__UpperCamelCase = git.Repo(search_parent_directories=_lowercase )
__UpperCamelCase = {
'repo_id': str(_lowercase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def _A ( _lowercase , _lowercase ) -> List:
"""simple docstring"""
return list(map(_lowercase , _lowercase ) )
def _A ( _lowercase , _lowercase ) -> Tuple:
"""simple docstring"""
with open(_lowercase , 'wb' ) as f:
return pickle.dump(_lowercase , _lowercase )
def _A ( _lowercase ) -> List[Any]:
"""simple docstring"""
def remove_articles(_lowercase ):
return re.sub(r'\b(a|an|the)\b' , ' ' , _lowercase )
def white_space_fix(_lowercase ):
return " ".join(text.split() )
def remove_punc(_lowercase ):
__UpperCamelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_lowercase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_lowercase ) ) ) )
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = normalize_answer(_lowercase ).split()
__UpperCamelCase = normalize_answer(_lowercase ).split()
__UpperCamelCase = Counter(_lowercase ) & Counter(_lowercase )
__UpperCamelCase = sum(common.values() )
if num_same == 0:
return 0
__UpperCamelCase = 1.0 * num_same / len(_lowercase )
__UpperCamelCase = 1.0 * num_same / len(_lowercase )
__UpperCamelCase = (2 * precision * recall) / (precision + recall)
return fa
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
return normalize_answer(_lowercase ) == normalize_answer(_lowercase )
def _A ( _lowercase , _lowercase ) -> Dict:
"""simple docstring"""
assert len(_lowercase ) == len(_lowercase )
__UpperCamelCase = 0
for hypo, pred in zip(_lowercase , _lowercase ):
em += exact_match_score(_lowercase , _lowercase )
if len(_lowercase ) > 0:
em /= len(_lowercase )
return {"em": em}
def _A ( _lowercase ) -> Optional[Any]:
"""simple docstring"""
return model_prefix.startswith('rag' )
def _A ( _lowercase , _lowercase , _lowercase ) -> Dict:
"""simple docstring"""
__UpperCamelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__UpperCamelCase = 'dropout_rate'
for p in extra_params:
if getattr(_lowercase , _lowercase , _lowercase ):
if not hasattr(_lowercase , _lowercase ) and not hasattr(_lowercase , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(_lowercase ) )
delattr(_lowercase , _lowercase )
continue
__UpperCamelCase = p if hasattr(_lowercase , _lowercase ) else equivalent_param[p]
setattr(_lowercase , _lowercase , getattr(_lowercase , _lowercase ) )
delattr(_lowercase , _lowercase )
return hparams, config
| 310
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/config.json''',
'''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/config.json''',
'''xlm-roberta-large-finetuned-conll02-dutch''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll02-spanish''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-english''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-german''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'''
),
}
class __lowerCamelCase (_a ):
_lowercase = """xlm-roberta"""
def __init__( self: Union[str, Any],A_: Union[str, Any]=3_0522,A_: Dict=768,A_: Union[str, Any]=12,A_: Any=12,A_: str=3072,A_: Union[str, Any]="gelu",A_: str=0.1,A_: Optional[int]=0.1,A_: List[Any]=512,A_: Optional[Any]=2,A_: Dict=0.0_2,A_: List[Any]=1E-12,A_: Optional[int]=1,A_: str=0,A_: str=2,A_: Optional[Any]="absolute",A_: Union[str, Any]=True,A_: int=None,**A_: Optional[Any],):
'''simple docstring'''
super().__init__(pad_token_id=A_,bos_token_id=A_,eos_token_id=A_,**A_ )
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_act
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = position_embedding_type
__UpperCamelCase = use_cache
__UpperCamelCase = classifier_dropout
class __lowerCamelCase (_a ):
@property
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
__UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 310
| 1
|
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''',
}
class __lowerCamelCase (_a ):
_lowercase = """xlnet"""
_lowercase = ["""mems"""]
_lowercase = {
"""n_token""": """vocab_size""", # Backward compatibility
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self: List[Any],A_: Optional[int]=3_2000,A_: List[Any]=1024,A_: List[str]=24,A_: str=16,A_: Optional[Any]=4096,A_: Union[str, Any]="gelu",A_: Dict=True,A_: Union[str, Any]="bi",A_: Union[str, Any]=0.0_2,A_: Optional[int]=1E-12,A_: Tuple=0.1,A_: int=512,A_: str=None,A_: Tuple=True,A_: Optional[Any]=False,A_: Any=False,A_: Tuple=-1,A_: Any=False,A_: str="last",A_: int=True,A_: Union[str, Any]="tanh",A_: List[str]=0.1,A_: List[Any]=5,A_: Union[str, Any]=5,A_: Union[str, Any]=5,A_: Optional[int]=1,A_: Union[str, Any]=2,**A_: Union[str, Any],):
'''simple docstring'''
__UpperCamelCase = vocab_size
__UpperCamelCase = d_model
__UpperCamelCase = n_layer
__UpperCamelCase = n_head
if d_model % n_head != 0:
raise ValueError(F'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F'''`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})''' )
__UpperCamelCase = d_model // n_head
__UpperCamelCase = ff_activation
__UpperCamelCase = d_inner
__UpperCamelCase = untie_r
__UpperCamelCase = attn_type
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = dropout
__UpperCamelCase = mem_len
__UpperCamelCase = reuse_len
__UpperCamelCase = bi_data
__UpperCamelCase = clamp_len
__UpperCamelCase = same_length
__UpperCamelCase = summary_type
__UpperCamelCase = summary_use_proj
__UpperCamelCase = summary_activation
__UpperCamelCase = summary_last_dropout
__UpperCamelCase = start_n_top
__UpperCamelCase = end_n_top
__UpperCamelCase = bos_token_id
__UpperCamelCase = pad_token_id
__UpperCamelCase = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'
' instead.',A_,)
__UpperCamelCase = kwargs['use_cache']
__UpperCamelCase = use_mems_eval
__UpperCamelCase = use_mems_train
super().__init__(pad_token_id=A_,bos_token_id=A_,eos_token_id=A_,**A_ )
@property
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def snake_case_ ( self: int,A_: List[Any] ):
'''simple docstring'''
raise NotImplementedError(
F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 310
|
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__snake_case = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __lowerCamelCase (_a ):
_lowercase = field(default=_a , metadata={"""help""": """Whether to use SortishSampler or not."""} )
_lowercase = field(
default=_a , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
_lowercase = field(
default=_a , metadata={
"""help""": (
"""The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `max_length` value of the model configuration."""
)
} , )
_lowercase = field(
default=_a , metadata={
"""help""": (
"""The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `num_beams` value of the model configuration."""
)
} , )
_lowercase = field(
default=_a , metadata={
"""help""": """Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."""
} , )
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = super().to_dict()
for k, v in d.items():
if isinstance(A_,A_ ):
__UpperCamelCase = v.to_dict()
return d
| 310
| 1
|
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class __lowerCamelCase (_a ):
_lowercase = ["""image_processor""", """tokenizer"""]
_lowercase = """OwlViTImageProcessor"""
_lowercase = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self: int,A_: Tuple=None,A_: int=None,**A_: int ):
'''simple docstring'''
__UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.',A_,)
__UpperCamelCase = kwargs.pop('feature_extractor' )
__UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(A_,A_ )
def __call__( self: str,A_: Dict=None,A_: Optional[int]=None,A_: Any=None,A_: Tuple="max_length",A_: int="np",**A_: Optional[Any] ):
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(A_,A_ ) or (isinstance(A_,A_ ) and not isinstance(text[0],A_ )):
__UpperCamelCase = [self.tokenizer(A_,padding=A_,return_tensors=A_,**A_ )]
elif isinstance(A_,A_ ) and isinstance(text[0],A_ ):
__UpperCamelCase = []
# Maximum number of queries across batch
__UpperCamelCase = max([len(A_ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(A_ ) != max_num_queries:
__UpperCamelCase = t + [' '] * (max_num_queries - len(A_ ))
__UpperCamelCase = self.tokenizer(A_,padding=A_,return_tensors=A_,**A_ )
encodings.append(A_ )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
__UpperCamelCase = np.concatenate([encoding['input_ids'] for encoding in encodings],axis=0 )
__UpperCamelCase = np.concatenate([encoding['attention_mask'] for encoding in encodings],axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__UpperCamelCase = jnp.concatenate([encoding['input_ids'] for encoding in encodings],axis=0 )
__UpperCamelCase = jnp.concatenate([encoding['attention_mask'] for encoding in encodings],axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__UpperCamelCase = torch.cat([encoding['input_ids'] for encoding in encodings],dim=0 )
__UpperCamelCase = torch.cat([encoding['attention_mask'] for encoding in encodings],dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__UpperCamelCase = tf.stack([encoding['input_ids'] for encoding in encodings],axis=0 )
__UpperCamelCase = tf.stack([encoding['attention_mask'] for encoding in encodings],axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
__UpperCamelCase = BatchEncoding()
__UpperCamelCase = input_ids
__UpperCamelCase = attention_mask
if query_images is not None:
__UpperCamelCase = BatchEncoding()
__UpperCamelCase = self.image_processor(
A_,return_tensors=A_,**A_ ).pixel_values
__UpperCamelCase = query_pixel_values
if images is not None:
__UpperCamelCase = self.image_processor(A_,return_tensors=A_,**A_ )
if text is not None and images is not None:
__UpperCamelCase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__UpperCamelCase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**A_ ),tensor_type=A_ )
def snake_case_ ( self: Optional[int],*A_: int,**A_: List[Any] ):
'''simple docstring'''
return self.image_processor.post_process(*A_,**A_ )
def snake_case_ ( self: str,*A_: Optional[int],**A_: List[Any] ):
'''simple docstring'''
return self.image_processor.post_process_object_detection(*A_,**A_ )
def snake_case_ ( self: str,*A_: Tuple,**A_: int ):
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*A_,**A_ )
def snake_case_ ( self: List[str],*A_: str,**A_: List[Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*A_,**A_ )
def snake_case_ ( self: int,*A_: Any,**A_: Tuple ):
'''simple docstring'''
return self.tokenizer.decode(*A_,**A_ )
@property
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.',A_,)
return self.image_processor_class
@property
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.',A_,)
return self.image_processor
| 310
|
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def _A ( _lowercase ) -> Dict:
"""simple docstring"""
if is_torch_version('<' , '2.0.0' ) or not hasattr(_lowercase , '_dynamo' ):
return False
return isinstance(_lowercase , torch._dynamo.eval_frame.OptimizedModule )
def _A ( _lowercase , _lowercase = True ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
__UpperCamelCase = is_compiled_module(_lowercase )
if is_compiled:
__UpperCamelCase = model
__UpperCamelCase = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(_lowercase , _lowercase ):
__UpperCamelCase = model.module
if not keep_fpaa_wrapper:
__UpperCamelCase = getattr(_lowercase , 'forward' )
__UpperCamelCase = model.__dict__.pop('_original_forward' , _lowercase )
if original_forward is not None:
while hasattr(_lowercase , '__wrapped__' ):
__UpperCamelCase = forward.__wrapped__
if forward == original_forward:
break
__UpperCamelCase = forward
if getattr(_lowercase , '_converted_to_transformer_engine' , _lowercase ):
convert_model(_lowercase , to_transformer_engine=_lowercase )
if is_compiled:
__UpperCamelCase = model
__UpperCamelCase = compiled_model
return model
def _A ( ) -> Any:
"""simple docstring"""
PartialState().wait_for_everyone()
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
if PartialState().distributed_type == DistributedType.TPU:
xm.save(_lowercase , _lowercase )
elif PartialState().local_process_index == 0:
torch.save(_lowercase , _lowercase )
@contextmanager
def _A ( **_lowercase ) -> Union[str, Any]:
"""simple docstring"""
for key, value in kwargs.items():
__UpperCamelCase = str(_lowercase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def _A ( _lowercase ) -> Tuple:
"""simple docstring"""
if not hasattr(_lowercase , '__qualname__' ) and not hasattr(_lowercase , '__name__' ):
__UpperCamelCase = getattr(_lowercase , '__class__' , _lowercase )
if hasattr(_lowercase , '__qualname__' ):
return obj.__qualname__
if hasattr(_lowercase , '__name__' ):
return obj.__name__
return str(_lowercase )
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
for key, value in source.items():
if isinstance(_lowercase , _lowercase ):
__UpperCamelCase = destination.setdefault(_lowercase , {} )
merge_dicts(_lowercase , _lowercase )
else:
__UpperCamelCase = value
return destination
def _A ( _lowercase = None ) -> bool:
"""simple docstring"""
if port is None:
__UpperCamelCase = 2_95_00
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 310
| 1
|
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
__snake_case = logging.getLogger(__name__)
def _A ( _lowercase=2 , _lowercase=3 , _lowercase=16 , _lowercase = 10 , _lowercase = 2 ) -> Tuple:
"""simple docstring"""
def get_dataset(_lowercase ):
__UpperCamelCase = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(_lowercase , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
__UpperCamelCase = get_dataset(_lowercase )
__UpperCamelCase = get_dataset(_lowercase )
__UpperCamelCase = DataLoader(_lowercase , shuffle=_lowercase , batch_size=_lowercase , num_workers=4 )
__UpperCamelCase = DataLoader(_lowercase , shuffle=_lowercase , batch_size=_lowercase , num_workers=4 )
return (train_dataloader, valid_dataloader)
def _A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=None ) -> Tuple:
"""simple docstring"""
__UpperCamelCase = []
for epoch in range(_lowercase ):
# Train quickly
model.train()
for batch in dataloader:
__UpperCamelCase, __UpperCamelCase = batch
__UpperCamelCase = model(_lowercase )
__UpperCamelCase = torch.nn.functional.mse_loss(_lowercase , _lowercase )
accelerator.backward(_lowercase )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class __lowerCamelCase (nn.Module ):
def __init__( self: Optional[Any] ):
'''simple docstring'''
super().__init__()
__UpperCamelCase = nn.Parameter(torch.randn(1 ) )
__UpperCamelCase = nn.Parameter(torch.randn(1 ) )
def snake_case_ ( self: List[Any],A_: Tuple ):
'''simple docstring'''
return x * self.a + self.b
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: int ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters(),lr=1E-3 )
__UpperCamelCase, __UpperCamelCase = dummy_dataloaders()
__UpperCamelCase = ProjectConfiguration(total_limit=1,project_dir=A_,automatic_checkpoint_naming=A_ )
# Train baseline
__UpperCamelCase = Accelerator(project_config=A_ )
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = accelerator.prepare(
A_,A_,A_,A_ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ),1 )
def snake_case_ ( self: str ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters(),lr=1E-3 )
__UpperCamelCase, __UpperCamelCase = dummy_dataloaders()
# Train baseline
__UpperCamelCase = Accelerator()
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = accelerator.prepare(
A_,A_,A_,A_ )
# Save initial
__UpperCamelCase = os.path.join(A_,'initial' )
accelerator.save_state(A_ )
((__UpperCamelCase), (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
__UpperCamelCase = train(3,A_,A_,A_,A_ )
((__UpperCamelCase), (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
# Train partially
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters(),lr=1E-3 )
__UpperCamelCase, __UpperCamelCase = dummy_dataloaders()
__UpperCamelCase = Accelerator()
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = accelerator.prepare(
A_,A_,A_,A_ )
accelerator.load_state(A_ )
((__UpperCamelCase), (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
self.assertEqual(A_,A_ )
self.assertEqual(A_,A_ )
self.assertEqual(A_,A_ )
__UpperCamelCase = train(2,A_,A_,A_,A_ )
# Save everything
__UpperCamelCase = os.path.join(A_,'checkpoint' )
accelerator.save_state(A_ )
# Load everything back in and make sure all states work
accelerator.load_state(A_ )
test_rands += train(1,A_,A_,A_,A_ )
((__UpperCamelCase), (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
self.assertEqual(A_,A_ )
self.assertEqual(A_,A_ )
self.assertEqual(A_,A_ )
self.assertEqual(A_,A_ )
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters(),lr=1E-3 )
__UpperCamelCase, __UpperCamelCase = dummy_dataloaders()
__UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=A_ )
# Train baseline
__UpperCamelCase = Accelerator(project_dir=A_,project_config=A_ )
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = accelerator.prepare(
A_,A_,A_,A_ )
# Save initial
accelerator.save_state()
((__UpperCamelCase), (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
__UpperCamelCase = train(3,A_,A_,A_,A_ )
((__UpperCamelCase), (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
# Train partially
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters(),lr=1E-3 )
__UpperCamelCase, __UpperCamelCase = dummy_dataloaders()
__UpperCamelCase = ProjectConfiguration(iteration=1,automatic_checkpoint_naming=A_ )
__UpperCamelCase = Accelerator(project_dir=A_,project_config=A_ )
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = accelerator.prepare(
A_,A_,A_,A_ )
accelerator.load_state(os.path.join(A_,'checkpoints','checkpoint_0' ) )
((__UpperCamelCase), (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
self.assertEqual(A_,A_ )
self.assertEqual(A_,A_ )
self.assertEqual(A_,A_ )
__UpperCamelCase = train(2,A_,A_,A_,A_ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A_,'checkpoints','checkpoint_1' ) )
test_rands += train(1,A_,A_,A_,A_ )
((__UpperCamelCase), (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
self.assertEqual(A_,A_ )
self.assertEqual(A_,A_ )
self.assertEqual(A_,A_ )
self.assertEqual(A_,A_ )
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = torch.tensor([1, 2, 3] )
__UpperCamelCase = torch.tensor([2, 3, 4] )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(net.parameters() )
__UpperCamelCase = Accelerator()
with self.assertRaises(A_ ) as ve:
accelerator.register_for_checkpointing(A_,A_,A_,A_ )
__UpperCamelCase = str(ve.exception )
self.assertTrue('Item at index 0' in message )
self.assertTrue('Item at index 1' in message )
self.assertFalse('Item at index 2' in message )
self.assertFalse('Item at index 3' in message )
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters(),lr=1E-3 )
__UpperCamelCase = torch.optim.lr_scheduler.StepLR(A_,step_size=1,gamma=0.9_9 )
__UpperCamelCase, __UpperCamelCase = dummy_dataloaders()
__UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=A_ )
# Train baseline
__UpperCamelCase = Accelerator(project_dir=A_,project_config=A_ )
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = accelerator.prepare(
A_,A_,A_,A_,A_ )
# Save initial
accelerator.save_state()
__UpperCamelCase = scheduler.state_dict()
train(3,A_,A_,A_,A_,A_ )
self.assertNotEqual(A_,scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A_,'checkpoints','checkpoint_0' ) )
self.assertEqual(A_,scheduler.state_dict() )
def snake_case_ ( self: Dict ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=A_,total_limit=2 )
# Train baseline
__UpperCamelCase = Accelerator(project_dir=A_,project_config=A_ )
__UpperCamelCase = accelerator.prepare(A_ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(A_,'checkpoints','checkpoint_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(A_,'checkpoints','checkpoint_9' ) ) )
self.assertTrue(os.path.exists(os.path.join(A_,'checkpoints','checkpoint_10' ) ) )
@require_cuda
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = ['torchrun', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(A_,env=os.environ.copy() )
if __name__ == "__main__":
__snake_case = '''/tmp/accelerate/state_checkpointing'''
__snake_case = DummyModel()
__snake_case = torch.optim.Adam(params=model.parameters(), lr=1e-3)
__snake_case = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
__snake_case , __snake_case = dummy_dataloaders()
__snake_case = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
__snake_case = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='''no''')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
__snake_case , __snake_case = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
__snake_case = group['''params'''][0].device
break
assert param_device.type == accelerator.device.type
__snake_case = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''cpu''')
for group in optimizer.param_groups:
__snake_case = group['''params'''][0].device
break
assert (
param_device.type == torch.device('''cpu''').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''on_device''')
for group in optimizer.param_groups:
__snake_case = group['''params'''][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='''Unsupported optimizer map location passed'''):
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''invalid''')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 310
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__snake_case = logging.getLogger(__name__)
def _A ( _lowercase , _lowercase ) -> Optional[int]:
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class __lowerCamelCase :
_lowercase = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class __lowerCamelCase :
_lowercase = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} )
_lowercase = field(metadata={"""help""": """Should contain the data files for the task."""} )
_lowercase = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_lowercase = field(
default=_a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _A ( ) -> str:
"""simple docstring"""
__UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _lowercase )
# Set seed
set_seed(training_args.seed )
try:
__UpperCamelCase = processors[data_args.task_name]()
__UpperCamelCase = processor.get_labels()
__UpperCamelCase = len(_lowercase )
except KeyError:
raise ValueError('Task not found: %s' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowercase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
__UpperCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__UpperCamelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , )
# Get datasets
__UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_lowercase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_lowercase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(_lowercase ) -> Dict:
__UpperCamelCase = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(_lowercase , p.label_ids )}
# Data collator
__UpperCamelCase = DataCollatorWithPadding(_lowercase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__UpperCamelCase = Trainer(
model=_lowercase , args=_lowercase , train_dataset=_lowercase , eval_dataset=_lowercase , compute_metrics=_lowercase , data_collator=_lowercase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__UpperCamelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__UpperCamelCase = trainer.evaluate()
__UpperCamelCase = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_master():
with open(_lowercase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , _lowercase , _lowercase )
writer.write('%s = %s\n' % (key, value) )
results.update(_lowercase )
return results
def _A ( _lowercase ) -> List[Any]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 310
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __lowerCamelCase (_a ):
_lowercase = """yolos"""
def __init__( self: Tuple,A_: Any=768,A_: Any=12,A_: str=12,A_: Optional[int]=3072,A_: Dict="gelu",A_: Optional[Any]=0.0,A_: int=0.0,A_: int=0.0_2,A_: str=1E-12,A_: List[Any]=[512, 864],A_: Optional[int]=16,A_: Tuple=3,A_: Optional[int]=True,A_: List[str]=100,A_: Dict=True,A_: Optional[int]=False,A_: Dict=1,A_: Any=5,A_: Optional[Any]=2,A_: Optional[Any]=5,A_: str=2,A_: Union[str, Any]=0.1,**A_: Tuple,):
'''simple docstring'''
super().__init__(**A_ )
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = image_size
__UpperCamelCase = patch_size
__UpperCamelCase = num_channels
__UpperCamelCase = qkv_bias
__UpperCamelCase = num_detection_tokens
__UpperCamelCase = use_mid_position_embeddings
__UpperCamelCase = auxiliary_loss
# Hungarian matcher
__UpperCamelCase = class_cost
__UpperCamelCase = bbox_cost
__UpperCamelCase = giou_cost
# Loss coefficients
__UpperCamelCase = bbox_loss_coefficient
__UpperCamelCase = giou_loss_coefficient
__UpperCamelCase = eos_coefficient
class __lowerCamelCase (_a ):
_lowercase = version.parse("""1.11""" )
@property
def snake_case_ ( self: Any ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
return 1E-4
@property
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
return 12
| 310
|
import os
def _A ( ) -> Tuple:
"""simple docstring"""
with open(os.path.dirname(_lowercase ) + '/p022_names.txt' ) as file:
__UpperCamelCase = str(file.readlines()[0] )
__UpperCamelCase = names.replace('"' , '' ).split(',' )
names.sort()
__UpperCamelCase = 0
__UpperCamelCase = 0
for i, name in enumerate(_lowercase ):
for letter in name:
name_score += ord(_lowercase ) - 64
total_score += (i + 1) * name_score
__UpperCamelCase = 0
return total_score
if __name__ == "__main__":
print(solution())
| 310
| 1
|
UpperCAmelCase__ = 256
# Modulus to hash a string
UpperCAmelCase__ = 1000003
def _a ( a :str , a :str ) -> bool:
a = len(a )
a = len(a )
if p_len > t_len:
return False
a = 0
a = 0
a = 1
# Calculating the hash of pattern and substring of text
for i in range(a ):
a = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
a = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
a = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
a = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def _a ( ) -> None:
a = '''abc1abc12'''
a = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
a = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(a , a ) and not rabin_karp(a , a )
# Test 2)
a = '''ABABX'''
a = '''ABABZABABYABABX'''
assert rabin_karp(a , a )
# Test 3)
a = '''AAAB'''
a = '''ABAAAAAB'''
assert rabin_karp(a , a )
# Test 4)
a = '''abcdabcy'''
a = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(a , a )
# Test 5)
a = '''Lü'''
a = '''Lüsai'''
assert rabin_karp(a , a )
a = '''Lue'''
assert not rabin_karp(a , a )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 0
|
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def _A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=True , _lowercase="pt" ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = {'add_prefix_space': True} if isinstance(_lowercase , _lowercase ) and not line.startswith(' ' ) else {}
__UpperCamelCase = padding_side
return tokenizer(
[line] , max_length=_lowercase , padding='max_length' if pad_to_max_length else None , truncation=_lowercase , return_tensors=_lowercase , add_special_tokens=_lowercase , **_lowercase , )
def _A ( _lowercase , _lowercase , _lowercase=None , ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = input_ids.ne(_lowercase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCamelCase (_a ):
def __init__( self: List[str],A_: str,A_: List[str],A_: List[str],A_: List[str],A_: Tuple="train",A_: Any=None,A_: List[str]=None,A_: List[Any]=None,A_: int="",):
'''simple docstring'''
super().__init__()
__UpperCamelCase = Path(A_ ).joinpath(type_path + '.source' )
__UpperCamelCase = Path(A_ ).joinpath(type_path + '.target' )
__UpperCamelCase = self.get_char_lens(self.src_file )
__UpperCamelCase = max_source_length
__UpperCamelCase = max_target_length
assert min(self.src_lens ) > 0, F'''found empty line in {self.src_file}'''
__UpperCamelCase = tokenizer
__UpperCamelCase = prefix
if n_obs is not None:
__UpperCamelCase = self.src_lens[:n_obs]
__UpperCamelCase = src_lang
__UpperCamelCase = tgt_lang
def __len__( self: Optional[Any] ):
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self: int,A_: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = index + 1 # linecache starts at 1
__UpperCamelCase = self.prefix + linecache.getline(str(self.src_file ),A_ ).rstrip('\n' )
__UpperCamelCase = linecache.getline(str(self.tgt_file ),A_ ).rstrip('\n' )
assert source_line, F'''empty source line for index {index}'''
assert tgt_line, F'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer,A_ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__UpperCamelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer,A_ ) else self.tokenizer
)
__UpperCamelCase = self.tokenizer.generator if isinstance(self.tokenizer,A_ ) else self.tokenizer
__UpperCamelCase = encode_line(A_,A_,self.max_source_length,'right' )
__UpperCamelCase = encode_line(A_,A_,self.max_target_length,'right' )
__UpperCamelCase = source_inputs['input_ids'].squeeze()
__UpperCamelCase = target_inputs['input_ids'].squeeze()
__UpperCamelCase = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def snake_case_ ( A_: List[Any] ):
'''simple docstring'''
return [len(A_ ) for x in Path(A_ ).open().readlines()]
def snake_case_ ( self: Union[str, Any],A_: Any ):
'''simple docstring'''
__UpperCamelCase = torch.stack([x['input_ids'] for x in batch] )
__UpperCamelCase = torch.stack([x['attention_mask'] for x in batch] )
__UpperCamelCase = torch.stack([x['decoder_input_ids'] for x in batch] )
__UpperCamelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer,A_ )
else self.tokenizer.pad_token_id
)
__UpperCamelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer,A_ )
else self.tokenizer.pad_token_id
)
__UpperCamelCase = trim_batch(A_,A_ )
__UpperCamelCase, __UpperCamelCase = trim_batch(A_,A_,attention_mask=A_ )
__UpperCamelCase = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
__snake_case = getLogger(__name__)
def _A ( _lowercase ) -> Any:
"""simple docstring"""
return list(itertools.chain.from_iterable(_lowercase ) )
def _A ( _lowercase ) -> None:
"""simple docstring"""
__UpperCamelCase = get_git_info()
save_json(_lowercase , os.path.join(_lowercase , 'git_log.json' ) )
def _A ( _lowercase , _lowercase , _lowercase=4 , **_lowercase ) -> List[Any]:
"""simple docstring"""
with open(_lowercase , 'w' ) as f:
json.dump(_lowercase , _lowercase , indent=_lowercase , **_lowercase )
def _A ( _lowercase ) -> Union[str, Any]:
"""simple docstring"""
with open(_lowercase ) as f:
return json.load(_lowercase )
def _A ( ) -> Dict:
"""simple docstring"""
__UpperCamelCase = git.Repo(search_parent_directories=_lowercase )
__UpperCamelCase = {
'repo_id': str(_lowercase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def _A ( _lowercase , _lowercase ) -> List:
"""simple docstring"""
return list(map(_lowercase , _lowercase ) )
def _A ( _lowercase , _lowercase ) -> Tuple:
"""simple docstring"""
with open(_lowercase , 'wb' ) as f:
return pickle.dump(_lowercase , _lowercase )
def _A ( _lowercase ) -> List[Any]:
"""simple docstring"""
def remove_articles(_lowercase ):
return re.sub(r'\b(a|an|the)\b' , ' ' , _lowercase )
def white_space_fix(_lowercase ):
return " ".join(text.split() )
def remove_punc(_lowercase ):
__UpperCamelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_lowercase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_lowercase ) ) ) )
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = normalize_answer(_lowercase ).split()
__UpperCamelCase = normalize_answer(_lowercase ).split()
__UpperCamelCase = Counter(_lowercase ) & Counter(_lowercase )
__UpperCamelCase = sum(common.values() )
if num_same == 0:
return 0
__UpperCamelCase = 1.0 * num_same / len(_lowercase )
__UpperCamelCase = 1.0 * num_same / len(_lowercase )
__UpperCamelCase = (2 * precision * recall) / (precision + recall)
return fa
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
return normalize_answer(_lowercase ) == normalize_answer(_lowercase )
def _A ( _lowercase , _lowercase ) -> Dict:
"""simple docstring"""
assert len(_lowercase ) == len(_lowercase )
__UpperCamelCase = 0
for hypo, pred in zip(_lowercase , _lowercase ):
em += exact_match_score(_lowercase , _lowercase )
if len(_lowercase ) > 0:
em /= len(_lowercase )
return {"em": em}
def _A ( _lowercase ) -> Optional[Any]:
"""simple docstring"""
return model_prefix.startswith('rag' )
def _A ( _lowercase , _lowercase , _lowercase ) -> Dict:
"""simple docstring"""
__UpperCamelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__UpperCamelCase = 'dropout_rate'
for p in extra_params:
if getattr(_lowercase , _lowercase , _lowercase ):
if not hasattr(_lowercase , _lowercase ) and not hasattr(_lowercase , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(_lowercase ) )
delattr(_lowercase , _lowercase )
continue
__UpperCamelCase = p if hasattr(_lowercase , _lowercase ) else equivalent_param[p]
setattr(_lowercase , _lowercase , getattr(_lowercase , _lowercase ) )
delattr(_lowercase , _lowercase )
return hparams, config
| 310
| 0
|
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
SCREAMING_SNAKE_CASE_: Dict =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: Optional[int] =list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
SCREAMING_SNAKE_CASE_: Optional[Any] =tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __A :
a__ : str = field(
default=UpperCamelCase__ , metadata={"""help""": """Model type selected in the list: """ + """, """.join(UpperCamelCase__ )} )
a__ : str = field(
default=UpperCamelCase__ , metadata={"""help""": """The input data dir. Should contain the .json files for the SQuAD task."""} )
a__ : int = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a__ : int = field(
default=128 , metadata={"""help""": """When splitting up a long document into chunks, how much stride to take between chunks."""} , )
a__ : int = field(
default=64 , metadata={
"""help""": (
"""The maximum number of tokens for the question. Questions longer than this will """
"""be truncated to this length."""
)
} , )
a__ : int = field(
default=30 , metadata={
"""help""": (
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
)
} , )
a__ : bool = field(
default=UpperCamelCase__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
a__ : bool = field(
default=UpperCamelCase__ , metadata={"""help""": """If true, the SQuAD examples contain some that do not have an answer."""} )
a__ : float = field(
default=0.0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
a__ : int = field(
default=20 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
a__ : int = field(
default=0 , metadata={
"""help""": (
"""language id of input for language-specific xlm models (see"""
""" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"""
)
} , )
a__ : int = field(default=1 , metadata={"""help""": """multiple threads for converting example to features"""} )
class __A ( UpperCamelCase__ ):
a__ : str = """train"""
a__ : List[Any] = """dev"""
class __A ( UpperCamelCase__ ):
a__ : SquadDataTrainingArguments
a__ : List[SquadFeatures]
a__ : Split
a__ : bool
def __init__(self : Optional[int] , __a : SquadDataTrainingArguments , __a : PreTrainedTokenizer , __a : Optional[int] = None , __a : Union[str, Split] = Split.train , __a : Optional[bool] = False , __a : Optional[str] = None , __a : Optional[str] = "pt" , ):
UpperCAmelCase_ = args
UpperCAmelCase_ = is_language_sensitive
UpperCAmelCase_ = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__a , __a ):
try:
UpperCAmelCase_ = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
UpperCAmelCase_ = mode
# Load data features from cache or dataset file
UpperCAmelCase_ = "v2" if args.version_2_with_negative else "v1"
UpperCAmelCase_ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}""" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCAmelCase_ = cached_features_file + ".lock"
with FileLock(__a ):
if os.path.exists(__a ) and not args.overwrite_cache:
UpperCAmelCase_ = time.time()
UpperCAmelCase_ = torch.load(__a )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
UpperCAmelCase_ = self.old_features["features"]
UpperCAmelCase_ = self.old_features.get("dataset" , __a )
UpperCAmelCase_ = self.old_features.get("examples" , __a )
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f"""Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"""
" future run" )
else:
if mode == Split.dev:
UpperCAmelCase_ = self.processor.get_dev_examples(args.data_dir )
else:
UpperCAmelCase_ = self.processor.get_train_examples(args.data_dir )
UpperCAmelCase_ , UpperCAmelCase_ = squad_convert_examples_to_features(
examples=self.examples , tokenizer=__a , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=__a , )
UpperCAmelCase_ = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , __a , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__(self : List[Any] ):
return len(self.features )
def __getitem__(self : Dict , __a : Optional[int] ):
# Convert to Tensors and build dataset
UpperCAmelCase_ = self.features[i]
UpperCAmelCase_ = torch.tensor(feature.input_ids , dtype=torch.long )
UpperCAmelCase_ = torch.tensor(feature.attention_mask , dtype=torch.long )
UpperCAmelCase_ = torch.tensor(feature.token_type_ids , dtype=torch.long )
UpperCAmelCase_ = torch.tensor(feature.cls_index , dtype=torch.long )
UpperCAmelCase_ = torch.tensor(feature.p_mask , dtype=torch.float )
UpperCAmelCase_ = torch.tensor(feature.is_impossible , dtype=torch.float )
UpperCAmelCase_ = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
UpperCAmelCase_ = torch.tensor(feature.start_position , dtype=torch.long )
UpperCAmelCase_ = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 310
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class __lowerCAmelCase (unittest.TestCase ):
'''simple docstring'''
def __init__(self : Dict , UpperCamelCase : str , UpperCamelCase : Union[str, Any]=7 , UpperCamelCase : Optional[int]=3 , UpperCamelCase : Optional[int]=18 , UpperCamelCase : List[Any]=30 , UpperCamelCase : Dict=400 , UpperCamelCase : Dict=True , UpperCamelCase : Optional[Any]=None , UpperCamelCase : List[Any]=True , UpperCamelCase : int=None , UpperCamelCase : Optional[int]=True , UpperCamelCase : Union[str, Any]=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , UpperCamelCase : Union[str, Any]=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , UpperCamelCase : Union[str, Any]=True , ):
'''simple docstring'''
lowercase__ = size if size is not None else {'''height''': 224, '''width''': 224}
lowercase__ = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = do_resize
lowercase__ = size
lowercase__ = do_center_crop
lowercase__ = crop_size
lowercase__ = do_normalize
lowercase__ = image_mean
lowercase__ = image_std
lowercase__ = do_convert_rgb
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def UpperCamelCase__ (self : List[str] , UpperCamelCase : Dict=False , UpperCamelCase : Optional[int]=False , UpperCamelCase : Tuple=False ):
'''simple docstring'''
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
lowercase__ = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
lowercase__ = []
for i in range(self.batch_size ):
lowercase__ ,lowercase__ = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
lowercase__ = [Image.fromarray(np.moveaxis(UpperCamelCase , 0 , -1 ) ) for x in image_inputs]
if torchify:
lowercase__ = [torch.from_numpy(UpperCamelCase ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class __lowerCAmelCase (lowercase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ : str = ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCamelCase__ (self : Tuple ):
'''simple docstring'''
lowercase__ = ChineseCLIPImageProcessingTester(self , do_center_crop=UpperCamelCase )
@property
def UpperCamelCase__ (self : List[str] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ (self : str ):
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCamelCase , '''size''' ) )
self.assertTrue(hasattr(UpperCamelCase , '''do_center_crop''' ) )
self.assertTrue(hasattr(UpperCamelCase , '''center_crop''' ) )
self.assertTrue(hasattr(UpperCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCamelCase , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCamelCase , '''image_std''' ) )
self.assertTrue(hasattr(UpperCamelCase , '''do_convert_rgb''' ) )
def UpperCamelCase__ (self : Tuple ):
'''simple docstring'''
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 224, '''width''': 224} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def UpperCamelCase__ (self : Dict ):
'''simple docstring'''
pass
def UpperCamelCase__ (self : Any ):
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowercase__ = image_processing(UpperCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCamelCase__ (self : Any ):
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCamelCase , numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , np.ndarray )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowercase__ = image_processing(UpperCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowercase__ = image_processing(UpperCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
@require_torch
@require_vision
class __lowerCAmelCase (lowercase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
lowercase__ = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=UpperCamelCase )
lowercase__ = 3
@property
def UpperCamelCase__ (self : int ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCamelCase , '''size''' ) )
self.assertTrue(hasattr(UpperCamelCase , '''do_center_crop''' ) )
self.assertTrue(hasattr(UpperCamelCase , '''center_crop''' ) )
self.assertTrue(hasattr(UpperCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCamelCase , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCamelCase , '''image_std''' ) )
self.assertTrue(hasattr(UpperCamelCase , '''do_convert_rgb''' ) )
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
pass
def UpperCamelCase__ (self : Tuple ):
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowercase__ = image_processing(UpperCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 2
|
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
__snake_case = '''https://www.indeed.co.in/jobs?q=mobile+app+development&l='''
def _A ( _lowercase = "mumbai" ) -> Generator[tuple[str, str], None, None]:
"""simple docstring"""
__UpperCamelCase = BeautifulSoup(requests.get(url + location ).content , 'html.parser' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('div' , attrs={'data-tn-component': 'organicJob'} ):
__UpperCamelCase = job.find('a' , attrs={'data-tn-element': 'jobTitle'} ).text.strip()
__UpperCamelCase = job.find('span' , {'class': 'company'} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('''Bangalore'''), 1):
print(f"""Job {i:>2} is {job[0]} at {job[1]}""")
| 310
| 0
|
'''simple docstring'''
from collections.abc import Callable
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : float = a
A : float = b
if function(snake_case__ ) == 0: # one of the a or b is a root for the function
return a
elif function(snake_case__ ) == 0:
return b
elif (
function(snake_case__ ) * function(snake_case__ ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('''could not find root in given interval.''' )
else:
A : float = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(snake_case__ ) == 0:
return mid
elif function(snake_case__ ) * function(snake_case__ ) < 0:
A : Union[str, Any] = mid
else:
A : Optional[Any] = mid
A : Optional[int] = start + (end - start) / 2.0
return mid
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 10_00))
import doctest
doctest.testmod()
| 3
|
def _A ( _lowercase ) -> list:
"""simple docstring"""
def merge(_lowercase , _lowercase ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(_lowercase ) <= 1:
return collection
__UpperCamelCase = len(_lowercase ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case = input('''Enter numbers separated by a comma:\n''').strip()
__snake_case = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 310
| 0
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
__snake_case =logging.get_logger(__name__)
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : Dict , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : List[str] ) -> None:
warnings.warn(
'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use FlavaImageProcessor instead.' , UpperCAmelCase__ , )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 4
|
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class __lowerCamelCase (_a ):
_lowercase = 0
_lowercase = False
_lowercase = 3.0
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: Any ):
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs(),{} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs(),{'a': 2} )
self.assertDictEqual(MockClass(a=2,b=A_ ).to_kwargs(),{'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2,c=2.2_5 ).to_kwargs(),{'a': 2, 'c': 2.2_5} )
@require_cuda
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = GradScalerKwargs(init_scale=1024,growth_factor=2 )
AcceleratorState._reset_state()
__UpperCamelCase = Accelerator(mixed_precision='fp16',kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
__UpperCamelCase = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale,1_0_2_4.0 )
self.assertEqual(scaler._growth_factor,2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor,0.5 )
self.assertEqual(scaler._growth_interval,2000 )
self.assertEqual(scaler._enabled,A_ )
@require_multi_gpu
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = ['torchrun', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(A_,env=os.environ.copy() )
if __name__ == "__main__":
__snake_case = DistributedDataParallelKwargs(bucket_cap_mb=1_5, find_unused_parameters=True)
__snake_case = Accelerator(kwargs_handlers=[ddp_scaler])
__snake_case = torch.nn.Linear(1_0_0, 2_0_0)
__snake_case = accelerator.prepare(model)
# Check the values changed in kwargs
__snake_case = ''''''
__snake_case = model.bucket_bytes_cap // (1_0_2_4 * 1_0_2_4)
if observed_bucket_cap_map != 1_5:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 310
| 0
|
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCamelCase__ ( lowerCAmelCase , unittest.TestCase):
SCREAMING_SNAKE_CASE__ = BlenderbotSmallTokenizer
SCREAMING_SNAKE_CASE__ = False
def __A (self ) -> Any:
super().setUp()
_lowercase =['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
_lowercase =dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) )
_lowercase =['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
_lowercase ={'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
_lowercase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_lowercase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCAmelCase ) )
def __A (self , **UpperCAmelCase ) -> List[str]:
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def __A (self , UpperCAmelCase ) -> Optional[int]:
_lowercase ='''adapt act apte'''
_lowercase ='''adapt act apte'''
return input_text, output_text
def __A (self ) -> str:
_lowercase =BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowercase ='''adapt act apte'''
_lowercase =['''adapt''', '''act''', '''ap@@''', '''te''']
_lowercase =tokenizer.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
_lowercase =[tokenizer.bos_token] + tokens + [tokenizer.eos_token]
_lowercase =[0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , UpperCAmelCase )
def __A (self ) -> Dict:
_lowercase =BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
assert tok('''sam''' ).input_ids == [1_3_8_4]
_lowercase ='''I am a small frog.'''
_lowercase =tok([src_text] , padding=UpperCAmelCase , truncation=UpperCAmelCase )['''input_ids''']
_lowercase =tok.batch_decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def __A (self ) -> Dict:
_lowercase =BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
_lowercase ='''I am a small frog .'''
_lowercase ='''.'''
_lowercase =tok(UpperCAmelCase )['''input_ids''']
_lowercase =tok(UpperCAmelCase )['''input_ids''']
assert encoded[-1] == encoded_dot[0]
| 5
|
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class __lowerCamelCase (_a ):
_lowercase = ["""image_processor""", """tokenizer"""]
_lowercase = """OwlViTImageProcessor"""
_lowercase = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self: int,A_: Tuple=None,A_: int=None,**A_: int ):
'''simple docstring'''
__UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.',A_,)
__UpperCamelCase = kwargs.pop('feature_extractor' )
__UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(A_,A_ )
def __call__( self: str,A_: Dict=None,A_: Optional[int]=None,A_: Any=None,A_: Tuple="max_length",A_: int="np",**A_: Optional[Any] ):
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(A_,A_ ) or (isinstance(A_,A_ ) and not isinstance(text[0],A_ )):
__UpperCamelCase = [self.tokenizer(A_,padding=A_,return_tensors=A_,**A_ )]
elif isinstance(A_,A_ ) and isinstance(text[0],A_ ):
__UpperCamelCase = []
# Maximum number of queries across batch
__UpperCamelCase = max([len(A_ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(A_ ) != max_num_queries:
__UpperCamelCase = t + [' '] * (max_num_queries - len(A_ ))
__UpperCamelCase = self.tokenizer(A_,padding=A_,return_tensors=A_,**A_ )
encodings.append(A_ )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
__UpperCamelCase = np.concatenate([encoding['input_ids'] for encoding in encodings],axis=0 )
__UpperCamelCase = np.concatenate([encoding['attention_mask'] for encoding in encodings],axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__UpperCamelCase = jnp.concatenate([encoding['input_ids'] for encoding in encodings],axis=0 )
__UpperCamelCase = jnp.concatenate([encoding['attention_mask'] for encoding in encodings],axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__UpperCamelCase = torch.cat([encoding['input_ids'] for encoding in encodings],dim=0 )
__UpperCamelCase = torch.cat([encoding['attention_mask'] for encoding in encodings],dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__UpperCamelCase = tf.stack([encoding['input_ids'] for encoding in encodings],axis=0 )
__UpperCamelCase = tf.stack([encoding['attention_mask'] for encoding in encodings],axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
__UpperCamelCase = BatchEncoding()
__UpperCamelCase = input_ids
__UpperCamelCase = attention_mask
if query_images is not None:
__UpperCamelCase = BatchEncoding()
__UpperCamelCase = self.image_processor(
A_,return_tensors=A_,**A_ ).pixel_values
__UpperCamelCase = query_pixel_values
if images is not None:
__UpperCamelCase = self.image_processor(A_,return_tensors=A_,**A_ )
if text is not None and images is not None:
__UpperCamelCase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__UpperCamelCase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**A_ ),tensor_type=A_ )
def snake_case_ ( self: Optional[int],*A_: int,**A_: List[Any] ):
'''simple docstring'''
return self.image_processor.post_process(*A_,**A_ )
def snake_case_ ( self: str,*A_: Optional[int],**A_: List[Any] ):
'''simple docstring'''
return self.image_processor.post_process_object_detection(*A_,**A_ )
def snake_case_ ( self: str,*A_: Tuple,**A_: int ):
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*A_,**A_ )
def snake_case_ ( self: List[str],*A_: str,**A_: List[Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*A_,**A_ )
def snake_case_ ( self: int,*A_: Any,**A_: Tuple ):
'''simple docstring'''
return self.tokenizer.decode(*A_,**A_ )
@property
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.',A_,)
return self.image_processor_class
@property
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.',A_,)
return self.image_processor
| 310
| 0
|
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
A : str = logging.get_logger(__name__)
class __A( a ):
def __init__( self , **_snake_case ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['''bs4'''] )
super().__init__(**_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> int:
'''simple docstring'''
__a = []
__a = []
__a = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
__a = parent.find_all(child.name , recursive=_snake_case )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(_snake_case ) else next(i for i, s in enumerate(_snake_case , 1 ) if s is child ) )
__a = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = BeautifulSoup(_snake_case , '''html.parser''' )
__a = []
__a = []
__a = []
for element in html_code.descendants:
if type(_snake_case ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
__a = html.unescape(_snake_case ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(_snake_case )
__a , __a = self.xpath_soup(_snake_case )
stringaxtag_seq.append(_snake_case )
stringaxsubs_seq.append(_snake_case )
if len(_snake_case ) != len(_snake_case ):
raise ValueError('''Number of doc strings and xtags does not correspond''' )
if len(_snake_case ) != len(_snake_case ):
raise ValueError('''Number of doc strings and xsubs does not correspond''' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = ''''''
for tagname, subs in zip(_snake_case , _snake_case ):
xpath += F"""/{tagname}"""
if subs != 0:
xpath += F"""[{subs}]"""
return xpath
def __call__( self , _snake_case ) -> BatchFeature:
'''simple docstring'''
__a = False
# Check that strings has a valid type
if isinstance(_snake_case , _snake_case ):
__a = True
elif isinstance(_snake_case , (list, tuple) ):
if len(_snake_case ) == 0 or isinstance(html_strings[0] , _snake_case ):
__a = True
if not valid_strings:
raise ValueError(
'''HTML strings must of type `str`, `List[str]` (batch of examples), '''
F"""but is of type {type(_snake_case )}.""" )
__a = bool(isinstance(_snake_case , (list, tuple) ) and (isinstance(html_strings[0] , _snake_case )) )
if not is_batched:
__a = [html_strings]
# Get nodes + xpaths
__a = []
__a = []
for html_string in html_strings:
__a , __a , __a = self.get_three_from_single(_snake_case )
nodes.append(_snake_case )
__a = []
for node, tag_list, sub_list in zip(_snake_case , _snake_case , _snake_case ):
__a = self.construct_xpath(_snake_case , _snake_case )
xpath_strings.append(_snake_case )
xpaths.append(_snake_case )
# return as Dict
__a = {'''nodes''': nodes, '''xpaths''': xpaths}
__a = BatchFeature(data=_snake_case , tensor_type=_snake_case )
return encoded_inputs
| 6
|
import math
def _A ( _lowercase ) -> int:
"""simple docstring"""
if not isinstance(_lowercase , _lowercase ):
__UpperCamelCase = f'''Input value of [number={number}] must be an integer'''
raise TypeError(_lowercase )
if number < 1:
__UpperCamelCase = f'''Input value of [number={number}] must be > 0'''
raise ValueError(_lowercase )
elif number == 1:
return 3
elif number == 2:
return 5
else:
__UpperCamelCase = int(math.log(number // 3 , 2 ) ) + 2
__UpperCamelCase = [3, 5]
__UpperCamelCase = 2
__UpperCamelCase = 3
for block in range(1 , _lowercase ):
for _ in range(_lowercase ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(1_1):
__snake_case = 0
try:
__snake_case = proth(number)
except ValueError:
print(f"""ValueError: there is no {number}th Proth number""")
continue
print(f"""The {number}th Proth number: {value}""")
| 310
| 0
|
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class A ( _UpperCAmelCase ):
"""simple docstring"""
def snake_case__ ( self : List[Any] )-> int:
'''simple docstring'''
A__ = tempfile.mkdtemp()
A__ = 8
# DPR tok
A__ = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
A__ = os.path.join(self.tmpdirname,'dpr_tokenizer' )
os.makedirs(lowercase_,exist_ok=lowercase_ )
A__ = os.path.join(lowercase_,DPR_VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file,'w',encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
# BART tok
A__ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
A__ = dict(zip(lowercase_,range(len(lowercase_ ) ) ) )
A__ = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
A__ = {'unk_token': '<unk>'}
A__ = os.path.join(self.tmpdirname,'bart_tokenizer' )
os.makedirs(lowercase_,exist_ok=lowercase_ )
A__ = os.path.join(lowercase_,BART_VOCAB_FILES_NAMES['vocab_file'] )
A__ = os.path.join(lowercase_,BART_VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file,'w',encoding='utf-8' ) as fp:
fp.write(json.dumps(lowercase_ ) + '\n' )
with open(self.merges_file,'w',encoding='utf-8' ) as fp:
fp.write('\n'.join(lowercase_ ) )
def snake_case__ ( self : List[str] )-> DPRQuestionEncoderTokenizer:
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname,'dpr_tokenizer' ) )
def snake_case__ ( self : List[Any] )-> BartTokenizer:
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname,'bart_tokenizer' ) )
def snake_case__ ( self : List[str] )-> List[str]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def snake_case__ ( self : str )-> Any:
'''simple docstring'''
A__ = os.path.join(self.tmpdirname,'rag_tokenizer' )
A__ = RagConfig(question_encoder=DPRConfig().to_dict(),generator=BartConfig().to_dict() )
A__ = RagTokenizer(question_encoder=self.get_dpr_tokenizer(),generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(lowercase_ )
rag_tokenizer.save_pretrained(lowercase_ )
A__ = RagTokenizer.from_pretrained(lowercase_,config=lowercase_ )
self.assertIsInstance(new_rag_tokenizer.question_encoder,lowercase_ )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab(),rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator,lowercase_ )
self.assertEqual(new_rag_tokenizer.generator.get_vocab(),rag_tokenizer.generator.get_vocab() )
@slow
def snake_case__ ( self : Optional[int] )-> Union[str, Any]:
'''simple docstring'''
A__ = RagTokenizer.from_pretrained('facebook/rag-token-nq' )
A__ = [
'who got the first nobel prize in physics',
'when is the next deadpool movie being released',
'which mode is used for short wave broadcast service',
'who is the owner of reading football club',
'when is the next scandal episode coming out',
'when is the last time the philadelphia won the superbowl',
'what is the most current adobe flash player version',
'how many episodes are there in dragon ball z',
'what is the first step in the evolution of the eye',
'where is gall bladder situated in human body',
'what is the main mineral in lithium batteries',
'who is the president of usa right now',
'where do the greasers live in the outsiders',
'panda is a national animal of which country',
'what is the name of manchester united stadium',
]
A__ = tokenizer(lowercase_ )
self.assertIsNotNone(lowercase_ )
@slow
def snake_case__ ( self : Optional[Any] )-> Dict:
'''simple docstring'''
A__ = RagTokenizer.from_pretrained('facebook/rag-sequence-nq' )
A__ = [
'who got the first nobel prize in physics',
'when is the next deadpool movie being released',
'which mode is used for short wave broadcast service',
'who is the owner of reading football club',
'when is the next scandal episode coming out',
'when is the last time the philadelphia won the superbowl',
'what is the most current adobe flash player version',
'how many episodes are there in dragon ball z',
'what is the first step in the evolution of the eye',
'where is gall bladder situated in human body',
'what is the main mineral in lithium batteries',
'who is the president of usa right now',
'where do the greasers live in the outsiders',
'panda is a national animal of which country',
'what is the name of manchester united stadium',
]
A__ = tokenizer(lowercase_ )
self.assertIsNotNone(lowercase_ )
| 7
|
import torch
from transformers import AutoModel
class __lowerCamelCase (torch.nn.Module ):
def __init__( self: Union[str, Any],A_: Tuple="sayef/fsner-bert-base-uncased" ):
'''simple docstring'''
super(A_,self ).__init__()
__UpperCamelCase = AutoModel.from_pretrained(A_,return_dict=A_ )
__UpperCamelCase = torch.nn.CosineSimilarity(3,1E-08 )
__UpperCamelCase = torch.nn.Softmax(dim=1 )
def snake_case_ ( self: Tuple,**A_: Union[str, Any] ):
'''simple docstring'''
return self.bert(**A_ ).last_hidden_state
def snake_case_ ( self: Union[str, Any],A_: Union[str, Any] ):
'''simple docstring'''
return token_embeddings.sum(2,keepdim=A_ )
def snake_case_ ( self: List[str],A_: Dict,A_: Union[str, Any],A_: Union[str, Any]=1 ):
'''simple docstring'''
return self.softmax(T * self.cos(A_,A_ ) )
def snake_case_ ( self: Optional[int],A_: Union[str, Any],A_: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = W_supports['sizes'].tolist()
__UpperCamelCase = W_supports['start_token_id'].item()
__UpperCamelCase = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
__UpperCamelCase = self.BERT(**A_ )
__UpperCamelCase = self.BERT(**A_ )
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = W_supports['input_ids'] == start_token_id
__UpperCamelCase = W_supports['input_ids'] == end_token_id
for i, size in enumerate(A_ ):
if i == 0:
__UpperCamelCase = 0
else:
__UpperCamelCase = support_sizes[i - 1]
__UpperCamelCase = S[s : s + size][start_token_masks[s : s + size]]
__UpperCamelCase = S[s : s + size][end_token_masks[s : s + size]]
__UpperCamelCase = torch.matmul(q[i],s_start.T ).sum(1 ).softmax(0 )
__UpperCamelCase = torch.matmul(q[i],s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
__UpperCamelCase = torch.vstack((p_starts, p_start) )
__UpperCamelCase = torch.vstack((p_ends, p_end) )
else:
__UpperCamelCase = p_start
__UpperCamelCase = p_end
return p_starts, p_ends
| 310
| 0
|
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = os.path.abspath(SCREAMING_SNAKE_CASE__ )
logger.info(F'''Converting TensorFlow checkpoint from {tf_path}''' )
# Load weights from TF model
snake_case_ = tf.train.list_variables(SCREAMING_SNAKE_CASE__ )
snake_case_ = []
snake_case_ = []
snake_case_ = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
snake_case_ = full_name.split('''/''' )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(F'''Skipping non-model layer {full_name}''' )
continue
if "optimizer" in full_name:
logger.info(F'''Skipping optimization layer {full_name}''' )
continue
if name[0] == "model":
# ignore initial 'model'
snake_case_ = name[1:]
# figure out how many levels deep the name is
snake_case_ = 0
for _name in name:
if _name.startswith('''layer_with_weights''' ):
depth += 1
else:
break
layer_depth.append(SCREAMING_SNAKE_CASE__ )
# read data
snake_case_ = tf.train.load_variable(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
names.append('''/'''.join(SCREAMING_SNAKE_CASE__ ) )
arrays.append(SCREAMING_SNAKE_CASE__ )
logger.info(F'''Read a total of {len(SCREAMING_SNAKE_CASE__ ):,} layers''' )
# Sanity check
if len(set(SCREAMING_SNAKE_CASE__ ) ) != 1:
raise ValueError(F'''Found layer names with different depths (layer depth {list(set(SCREAMING_SNAKE_CASE__ ) )})''' )
snake_case_ = list(set(SCREAMING_SNAKE_CASE__ ) )[0]
if layer_depth != 1:
raise ValueError(
'''The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP'''
''' heads.''' )
# convert layers
logger.info('''Converting weights...''' )
for full_name, array in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = full_name.split('''/''' )
snake_case_ = model
snake_case_ = []
for i, m_name in enumerate(SCREAMING_SNAKE_CASE__ ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith('''layer_with_weights''' ):
snake_case_ = int(m_name.split('''-''' )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(['''embeddings''', '''LayerNorm'''] )
snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''embeddings''' )
snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''LayerNorm''' )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(['''encoder''', '''layer''', str(layer_num - 4 )] )
snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''encoder''' )
snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''layer''' )
snake_case_ = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(['''pooler''', '''dense'''] )
snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''pooler''' )
snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''dense''' )
elif m_name == "embeddings":
trace.append('''embeddings''' )
snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''embeddings''' )
if layer_num == 0:
trace.append('''word_embeddings''' )
snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''word_embeddings''' )
elif layer_num == 1:
trace.append('''position_embeddings''' )
snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''position_embeddings''' )
elif layer_num == 2:
trace.append('''token_type_embeddings''' )
snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''token_type_embeddings''' )
else:
raise ValueError(F'''Unknown embedding layer with name {full_name}''' )
trace.append('''weight''' )
snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''weight''' )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(['''attention''', '''self'''] )
snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''attention''' )
snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''self''' )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(['''attention''', '''output''', '''LayerNorm'''] )
snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''attention''' )
snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''output''' )
snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''LayerNorm''' )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(['''attention''', '''output''', '''dense'''] )
snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''attention''' )
snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''output''' )
snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''dense''' )
elif m_name == "_output_dense":
# output dense
trace.extend(['''output''', '''dense'''] )
snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''output''' )
snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''dense''' )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(['''output''', '''LayerNorm'''] )
snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''output''' )
snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''LayerNorm''' )
elif m_name == "_key_dense":
# attention key
trace.append('''key''' )
snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''key''' )
elif m_name == "_query_dense":
# attention query
trace.append('''query''' )
snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''query''' )
elif m_name == "_value_dense":
# attention value
trace.append('''value''' )
snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''value''' )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(['''intermediate''', '''dense'''] )
snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''intermediate''' )
snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''dense''' )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append('''output''' )
snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''output''' )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append('''bias''' )
snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''bias''' )
elif m_name in ["kernel", "gamma"]:
trace.append('''weight''' )
snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , '''weight''' )
else:
logger.warning(F'''Ignored {m_name}''' )
# for certain layers reshape is necessary
snake_case_ = '''.'''.join(SCREAMING_SNAKE_CASE__ )
if re.match(R'''(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)''' , SCREAMING_SNAKE_CASE__ ) or re.match(
R'''(\S+)\.attention\.output\.dense\.weight''' , SCREAMING_SNAKE_CASE__ ):
snake_case_ = array.reshape(pointer.data.shape )
if "kernel" in full_name:
snake_case_ = array.transpose()
if pointer.shape == array.shape:
snake_case_ = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
else:
raise ValueError(
F'''Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:'''
F''' {array.shape}''' )
logger.info(F'''Successfully set variable {full_name} to PyTorch layer {trace}''' )
return model
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# Instantiate model
logger.info(F'''Loading model based on config from {config_path}...''' )
snake_case_ = BertConfig.from_json_file(SCREAMING_SNAKE_CASE__ )
snake_case_ = BertModel(SCREAMING_SNAKE_CASE__ )
# Load weights from checkpoint
logger.info(F'''Loading weights from checkpoint {tf_checkpoint_path}...''' )
load_tfa_weights_in_bert(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Save pytorch-model
logger.info(F'''Saving PyTorch model to {pytorch_dump_path}...''' )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'''--tf_checkpoint_path''', type=str, required=True, help='''Path to the TensorFlow 2.x checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
type=str,
required=True,
help='''The config json file corresponding to the BERT model. This specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''',
type=str,
required=True,
help='''Path to the output PyTorch model (must include filename).''',
)
lowerCAmelCase_ = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 8
|
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = BioGptTokenizer
_lowercase = False
def snake_case_ ( self: Any ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCamelCase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
__UpperCamelCase = dict(zip(A_,range(len(A_ ) ) ) )
__UpperCamelCase = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
__UpperCamelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file,'w' ) as fp:
fp.write(json.dumps(A_ ) )
with open(self.merges_file,'w' ) as fp:
fp.write('\n'.join(A_ ) )
def snake_case_ ( self: Optional[int],A_: List[Any] ):
'''simple docstring'''
__UpperCamelCase = 'lower newer'
__UpperCamelCase = 'lower newer'
return input_text, output_text
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = BioGptTokenizer(self.vocab_file,self.merges_file )
__UpperCamelCase = 'lower'
__UpperCamelCase = ['low', 'er</w>']
__UpperCamelCase = tokenizer.tokenize(A_ )
self.assertListEqual(A_,A_ )
__UpperCamelCase = tokens + ['<unk>']
__UpperCamelCase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ),A_ )
@slow
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
__UpperCamelCase = tokenizer.encode('sequence builders',add_special_tokens=A_ )
__UpperCamelCase = tokenizer.encode('multi-sequence build',add_special_tokens=A_ )
__UpperCamelCase = tokenizer.build_inputs_with_special_tokens(A_ )
__UpperCamelCase = tokenizer.build_inputs_with_special_tokens(A_,A_ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 310
| 0
|
from math import sqrt
def _UpperCamelCase ( lowercase__ ):
assert isinstance(lowercase__ , lowercase__ ) and (
number >= 0
), "'number' must been an int and positive"
__SCREAMING_SNAKE_CASE : str = True
# 0 and 1 are none primes.
if number <= 1:
__SCREAMING_SNAKE_CASE : Any = False
for divisor in range(2 , int(round(sqrt(lowercase__ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
__SCREAMING_SNAKE_CASE : List[Any] = False
break
# precondition
assert isinstance(lowercase__ , lowercase__ ), "'status' must been from type bool"
return status
def _UpperCamelCase ( lowercase__ ):
assert isinstance(lowercase__ , lowercase__ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
__SCREAMING_SNAKE_CASE : Optional[Any] = list(range(2 , n + 1 ) )
__SCREAMING_SNAKE_CASE : Optional[int] = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowercase__ ) ):
for j in range(i + 1 , len(lowercase__ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
__SCREAMING_SNAKE_CASE : List[Any] = 0
# filters actual prime numbers.
__SCREAMING_SNAKE_CASE : Tuple = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowercase__ , lowercase__ ), "'ans' must been from type list"
return ans
def _UpperCamelCase ( lowercase__ ):
assert isinstance(lowercase__ , lowercase__ ) and (n > 2), "'N' must been an int and > 2"
__SCREAMING_SNAKE_CASE : str = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowercase__ ):
ans.append(lowercase__ )
# precondition
assert isinstance(lowercase__ , lowercase__ ), "'ans' must been from type list"
return ans
def _UpperCamelCase ( lowercase__ ):
assert isinstance(lowercase__ , lowercase__ ) and number >= 0, "'number' must been an int and >= 0"
__SCREAMING_SNAKE_CASE : str = [] # this list will be returns of the function.
# potential prime number factors.
__SCREAMING_SNAKE_CASE : Union[str, Any] = 2
__SCREAMING_SNAKE_CASE : Any = number
if number == 0 or number == 1:
ans.append(lowercase__ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowercase__ ):
while quotient != 1:
if is_prime(lowercase__ ) and (quotient % factor == 0):
ans.append(lowercase__ )
quotient /= factor
else:
factor += 1
else:
ans.append(lowercase__ )
# precondition
assert isinstance(lowercase__ , lowercase__ ), "'ans' must been from type list"
return ans
def _UpperCamelCase ( lowercase__ ):
assert isinstance(lowercase__ , lowercase__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__SCREAMING_SNAKE_CASE : Optional[Any] = 0
# prime factorization of 'number'
__SCREAMING_SNAKE_CASE : Optional[Any] = prime_factorization(lowercase__ )
__SCREAMING_SNAKE_CASE : Tuple = max(lowercase__ )
# precondition
assert isinstance(lowercase__ , lowercase__ ), "'ans' must been from type int"
return ans
def _UpperCamelCase ( lowercase__ ):
assert isinstance(lowercase__ , lowercase__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__SCREAMING_SNAKE_CASE : int = 0
# prime factorization of 'number'
__SCREAMING_SNAKE_CASE : List[Any] = prime_factorization(lowercase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = min(lowercase__ )
# precondition
assert isinstance(lowercase__ , lowercase__ ), "'ans' must been from type int"
return ans
def _UpperCamelCase ( lowercase__ ):
assert isinstance(lowercase__ , lowercase__ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowercase__ ), "compare bust been from type bool"
return number % 2 == 0
def _UpperCamelCase ( lowercase__ ):
assert isinstance(lowercase__ , lowercase__ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowercase__ ), "compare bust been from type bool"
return number % 2 != 0
def _UpperCamelCase ( lowercase__ ):
assert (
isinstance(lowercase__ , lowercase__ ) and (number > 2) and is_even(lowercase__ )
), "'number' must been an int, even and > 2"
__SCREAMING_SNAKE_CASE : Tuple = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
__SCREAMING_SNAKE_CASE : Optional[Any] = get_prime_numbers(lowercase__ )
__SCREAMING_SNAKE_CASE : Tuple = len(lowercase__ )
# run variable for while-loops.
__SCREAMING_SNAKE_CASE : List[str] = 0
__SCREAMING_SNAKE_CASE : Tuple = None
# exit variable. for break up the loops
__SCREAMING_SNAKE_CASE : Any = True
while i < len_pn and loop:
__SCREAMING_SNAKE_CASE : Tuple = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
__SCREAMING_SNAKE_CASE : List[str] = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowercase__ , lowercase__ )
and (len(lowercase__ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def _UpperCamelCase ( lowercase__ , lowercase__ ):
assert (
isinstance(lowercase__ , lowercase__ )
and isinstance(lowercase__ , lowercase__ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
__SCREAMING_SNAKE_CASE : Any = 0
while numbera != 0:
__SCREAMING_SNAKE_CASE : List[Any] = numbera % numbera
__SCREAMING_SNAKE_CASE : Optional[int] = numbera
__SCREAMING_SNAKE_CASE : Optional[int] = rest
# precondition
assert isinstance(lowercase__ , lowercase__ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def _UpperCamelCase ( lowercase__ , lowercase__ ):
assert (
isinstance(lowercase__ , lowercase__ )
and isinstance(lowercase__ , lowercase__ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
__SCREAMING_SNAKE_CASE : Dict = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
__SCREAMING_SNAKE_CASE : Any = prime_factorization(lowercase__ )
__SCREAMING_SNAKE_CASE : Dict = prime_factorization(lowercase__ )
elif numbera == 1 or numbera == 1:
__SCREAMING_SNAKE_CASE : List[str] = []
__SCREAMING_SNAKE_CASE : List[Any] = []
__SCREAMING_SNAKE_CASE : Optional[Any] = max(lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE : int = 0
__SCREAMING_SNAKE_CASE : List[str] = 0
__SCREAMING_SNAKE_CASE : Any = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
__SCREAMING_SNAKE_CASE : int = prime_fac_a.count(lowercase__ )
__SCREAMING_SNAKE_CASE : str = prime_fac_a.count(lowercase__ )
for _ in range(max(lowercase__ , lowercase__ ) ):
ans *= n
else:
__SCREAMING_SNAKE_CASE : str = prime_fac_a.count(lowercase__ )
for _ in range(lowercase__ ):
ans *= n
done.append(lowercase__ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
__SCREAMING_SNAKE_CASE : str = prime_fac_a.count(lowercase__ )
for _ in range(lowercase__ ):
ans *= n
done.append(lowercase__ )
# precondition
assert isinstance(lowercase__ , lowercase__ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def _UpperCamelCase ( lowercase__ ):
assert isinstance(lowercase__ , lowercase__ ) and (n >= 0), "'number' must been a positive int"
__SCREAMING_SNAKE_CASE : int = 0
__SCREAMING_SNAKE_CASE : Union[str, Any] = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowercase__ ):
ans += 1
# precondition
assert isinstance(lowercase__ , lowercase__ ) and is_prime(
lowercase__ ), "'ans' must been a prime number and from type int"
return ans
def _UpperCamelCase ( lowercase__ , lowercase__ ):
assert (
is_prime(lowercase__ ) and is_prime(lowercase__ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
__SCREAMING_SNAKE_CASE : Optional[Any] = p_number_a + 1 # jump to the next number
__SCREAMING_SNAKE_CASE : Optional[Any] = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowercase__ ):
number += 1
while number < p_number_a:
ans.append(lowercase__ )
number += 1
# fetch the next prime number.
while not is_prime(lowercase__ ):
number += 1
# precondition
assert (
isinstance(lowercase__ , lowercase__ )
and ans[0] != p_number_a
and ans[len(lowercase__ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def _UpperCamelCase ( lowercase__ ):
assert isinstance(lowercase__ , lowercase__ ) and (n >= 1), "'n' must been int and >= 1"
__SCREAMING_SNAKE_CASE : List[str] = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowercase__ )
# precondition
assert ans[0] == 1 and ans[len(lowercase__ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def _UpperCamelCase ( lowercase__ ):
assert isinstance(lowercase__ , lowercase__ ) and (
number > 1
), "'number' must been an int and >= 1"
__SCREAMING_SNAKE_CASE : str = get_divisors(lowercase__ )
# precondition
assert (
isinstance(lowercase__ , lowercase__ )
and (divisors[0] == 1)
and (divisors[len(lowercase__ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def _UpperCamelCase ( lowercase__ , lowercase__ ):
assert (
isinstance(lowercase__ , lowercase__ )
and isinstance(lowercase__ , lowercase__ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
__SCREAMING_SNAKE_CASE : Optional[Any] = gcd(abs(lowercase__ ) , abs(lowercase__ ) )
# precondition
assert (
isinstance(lowercase__ , lowercase__ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def _UpperCamelCase ( lowercase__ ):
assert isinstance(lowercase__ , lowercase__ ) and (n >= 0), "'n' must been a int and >= 0"
__SCREAMING_SNAKE_CASE : Any = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def _UpperCamelCase ( lowercase__ ):
assert isinstance(lowercase__ , lowercase__ ) and (n >= 0), "'n' must been an int and >= 0"
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
__SCREAMING_SNAKE_CASE : Union[str, Any] = 1
__SCREAMING_SNAKE_CASE : Optional[int] = 1 # this will be return
for _ in range(n - 1 ):
__SCREAMING_SNAKE_CASE : Optional[Any] = ans
ans += fiba
__SCREAMING_SNAKE_CASE : Optional[Any] = tmp
return ans
| 9
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__snake_case = r'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(_a )
class __lowerCamelCase (_a ):
_lowercase = """rag"""
_lowercase = True
def __init__( self: Tuple,A_: Any=None,A_: Any=True,A_: List[Any]=None,A_: Optional[int]=None,A_: List[Any]=None,A_: str=None,A_: Union[str, Any]=None,A_: List[Any]=" / ",A_: Union[str, Any]=" // ",A_: List[Any]=5,A_: Optional[int]=300,A_: Tuple=768,A_: Tuple=8,A_: Optional[Any]="wiki_dpr",A_: int="train",A_: Union[str, Any]="compressed",A_: Optional[int]=None,A_: List[Any]=None,A_: List[str]=False,A_: List[str]=False,A_: str=0.0,A_: List[Any]=True,A_: Tuple=False,A_: int=False,A_: Dict=False,A_: Tuple=True,A_: int=None,**A_: Optional[int],):
'''simple docstring'''
super().__init__(
bos_token_id=A_,pad_token_id=A_,eos_token_id=A_,decoder_start_token_id=A_,forced_eos_token_id=A_,is_encoder_decoder=A_,prefix=A_,vocab_size=A_,**A_,)
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
__UpperCamelCase = kwargs.pop('question_encoder' )
__UpperCamelCase = question_encoder_config.pop('model_type' )
__UpperCamelCase = kwargs.pop('generator' )
__UpperCamelCase = decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
__UpperCamelCase = AutoConfig.for_model(A_,**A_ )
__UpperCamelCase = AutoConfig.for_model(A_,**A_ )
__UpperCamelCase = reduce_loss
__UpperCamelCase = label_smoothing
__UpperCamelCase = exclude_bos_score
__UpperCamelCase = do_marginalize
__UpperCamelCase = title_sep
__UpperCamelCase = doc_sep
__UpperCamelCase = n_docs
__UpperCamelCase = max_combined_length
__UpperCamelCase = dataset
__UpperCamelCase = dataset_split
__UpperCamelCase = index_name
__UpperCamelCase = retrieval_vector_size
__UpperCamelCase = retrieval_batch_size
__UpperCamelCase = passages_path
__UpperCamelCase = index_path
__UpperCamelCase = use_dummy_dataset
__UpperCamelCase = output_retrieved
__UpperCamelCase = do_deduplication
__UpperCamelCase = use_cache
if self.forced_eos_token_id is None:
__UpperCamelCase = getattr(self.generator,'forced_eos_token_id',A_ )
@classmethod
def snake_case_ ( cls: Any,A_: PretrainedConfig,A_: PretrainedConfig,**A_: int ):
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict(),generator=generator_config.to_dict(),**A_ )
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = copy.deepcopy(self.__dict__ )
__UpperCamelCase = self.question_encoder.to_dict()
__UpperCamelCase = self.generator.to_dict()
__UpperCamelCase = self.__class__.model_type
return output
| 310
| 0
|
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
__A = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
__A = typing.Union[np.floataa, int, float] # noqa: UP007
def lowerCAmelCase_ ( __a , __a ) -> VectorOut:
"""simple docstring"""
return np.sqrt(np.sum((np.asarray(__a ) - np.asarray(__a )) ** 2 ) )
def lowerCAmelCase_ ( __a , __a ) -> VectorOut:
"""simple docstring"""
return sum((va - va) ** 2 for va, va in zip(__a , __a ) ) ** (1 / 2)
if __name__ == "__main__":
def lowerCAmelCase_ ( ) -> None:
"""simple docstring"""
from timeit import timeit
print("Without Numpy" )
print(
timeit(
"euclidean_distance_no_np([1, 2, 3], [4, 5, 6])" , number=10000 , globals=globals() , ) )
print("With Numpy" )
print(
timeit(
"euclidean_distance([1, 2, 3], [4, 5, 6])" , number=10000 , globals=globals() , ) )
benchmark()
| 10
|
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class __lowerCamelCase (_a ):
_lowercase = """M-CLIP"""
def __init__( self: int,A_: Any=1024,A_: Union[str, Any]=768,**A_: str ):
'''simple docstring'''
__UpperCamelCase = transformerDimSize
__UpperCamelCase = imageDimSize
super().__init__(**A_ )
class __lowerCamelCase (_a ):
_lowercase = MCLIPConfig
def __init__( self: int,A_: Optional[Any],*A_: List[str],**A_: Union[str, Any] ):
'''simple docstring'''
super().__init__(A_,*A_,**A_ )
__UpperCamelCase = XLMRobertaModel(A_ )
__UpperCamelCase = torch.nn.Linear(
in_features=config.transformerDimensions,out_features=config.numDims )
def snake_case_ ( self: Dict,A_: int,A_: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = self.transformer(input_ids=A_,attention_mask=A_ )[0]
__UpperCamelCase = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(A_ ), embs
| 310
| 0
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=a)
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(default="automatic-speech-recognition" , metadata={"include_in_asdict_even_if_is_default": True})
__SCREAMING_SNAKE_CASE = Features({"audio": Audio()})
__SCREAMING_SNAKE_CASE = Features({"transcription": Value("string")})
__SCREAMING_SNAKE_CASE = "audio"
__SCREAMING_SNAKE_CASE = "transcription"
def _lowerCamelCase ( self , __lowerCamelCase) -> Any:
if self.audio_column not in features:
raise ValueError(F"Column {self.audio_column} is not present in features.")
if not isinstance(features[self.audio_column] , __lowerCamelCase):
raise ValueError(F"Column {self.audio_column} is not an Audio type.")
_A : Optional[int] = copy.deepcopy(self)
_A : List[str] = self.input_schema.copy()
_A : Any = features[self.audio_column]
_A : Tuple = input_schema
return task_template
@property
def _lowerCamelCase ( self) -> Dict[str, str]:
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 11
|
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class __lowerCamelCase :
_lowercase = XGLMConfig
_lowercase = {}
_lowercase = """gelu"""
def __init__( self: Optional[int],A_: Dict,A_: Any=14,A_: Optional[int]=7,A_: str=True,A_: Any=True,A_: Optional[int]=True,A_: Optional[int]=99,A_: List[str]=32,A_: Any=2,A_: Tuple=4,A_: List[str]=37,A_: Dict="gelu",A_: int=0.1,A_: List[str]=0.1,A_: int=512,A_: List[Any]=0.0_2,):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_input_mask
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = d_model
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = ffn_dim
__UpperCamelCase = activation_function
__UpperCamelCase = activation_dropout
__UpperCamelCase = attention_dropout
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = initializer_range
__UpperCamelCase = None
__UpperCamelCase = 0
__UpperCamelCase = 2
__UpperCamelCase = 1
def snake_case_ ( self: Dict ):
'''simple docstring'''
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length],self.vocab_size ),clip_value_min=0,clip_value_max=3 )
__UpperCamelCase = None
if self.use_input_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = self.get_config()
__UpperCamelCase = floats_tensor([self.num_hidden_layers, self.num_attention_heads],2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
return XGLMConfig(
vocab_size=self.vocab_size,d_model=self.hidden_size,num_layers=self.num_hidden_layers,attention_heads=self.num_attention_heads,ffn_dim=self.ffn_dim,activation_function=self.activation_function,activation_dropout=self.activation_dropout,attention_dropout=self.attention_dropout,max_position_embeddings=self.max_position_embeddings,initializer_range=self.initializer_range,use_cache=A_,bos_token_id=self.bos_token_id,eos_token_id=self.eos_token_id,pad_token_id=self.pad_token_id,return_dict=A_,)
def snake_case_ ( self: int ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
), (
__UpperCamelCase
), (
__UpperCamelCase
), (
__UpperCamelCase
),
) = config_and_inputs
__UpperCamelCase = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class __lowerCamelCase (_a , _a , unittest.TestCase ):
_lowercase = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
_lowercase = (TFXGLMForCausalLM,) if is_tf_available() else ()
_lowercase = (
{"""feature-extraction""": TFXGLMModel, """text-generation""": TFXGLMForCausalLM} if is_tf_available() else {}
)
_lowercase = False
_lowercase = False
_lowercase = False
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = TFXGLMModelTester(self )
__UpperCamelCase = ConfigTester(self,config_class=A_,n_embd=37 )
def snake_case_ ( self: Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
@slow
def snake_case_ ( self: Any ):
'''simple docstring'''
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = TFXGLMModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def snake_case_ ( self: Tuple ):
'''simple docstring'''
super().test_resize_token_embeddings()
@require_tf
class __lowerCamelCase (unittest.TestCase ):
@slow
def snake_case_ ( self: Optional[Any],A_: int=True ):
'''simple docstring'''
__UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = tf.convert_to_tensor([[2, 268, 9865]],dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__UpperCamelCase = [2, 268, 9865, 67, 11, 1988, 5_7252, 9865, 5, 984, 67, 1988, 21_3838, 1658, 53, 7_0446, 33, 6657, 278, 1581]
# fmt: on
__UpperCamelCase = model.generate(A_,do_sample=A_,num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist(),A_ )
@slow
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
__UpperCamelCase = tokenizer('Today is a nice day and',return_tensors='tf' )
__UpperCamelCase = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
__UpperCamelCase = model.generate(A_,do_sample=A_,seed=[7, 0] )
__UpperCamelCase = tokenizer.decode(output_ids[0],skip_special_tokens=A_ )
__UpperCamelCase = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(A_,A_ )
@slow
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = 'left'
# use different length sentences to test batching
__UpperCamelCase = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
__UpperCamelCase = tokenizer(A_,return_tensors='tf',padding=A_ )
__UpperCamelCase = inputs['input_ids']
__UpperCamelCase = model.generate(input_ids=A_,attention_mask=inputs['attention_mask'],max_new_tokens=12 )
__UpperCamelCase = tokenizer(sentences[0],return_tensors='tf' ).input_ids
__UpperCamelCase = model.generate(input_ids=A_,max_new_tokens=12 )
__UpperCamelCase = tokenizer(sentences[1],return_tensors='tf' ).input_ids
__UpperCamelCase = model.generate(input_ids=A_,max_new_tokens=12 )
__UpperCamelCase = tokenizer.batch_decode(A_,skip_special_tokens=A_ )
__UpperCamelCase = tokenizer.decode(output_non_padded[0],skip_special_tokens=A_ )
__UpperCamelCase = tokenizer.decode(output_padded[0],skip_special_tokens=A_ )
__UpperCamelCase = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(A_,A_ )
self.assertListEqual(A_,[non_padded_sentence, padded_sentence] )
| 310
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
UpperCAmelCase_ = None
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase_ = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'
),
},
}
UpperCAmelCase_ = {
'facebook/nllb-large-en-ro': 1_024,
'facebook/nllb-200-distilled-600M': 1_024,
}
# fmt: off
UpperCAmelCase_ = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Optional[int] = VOCAB_FILES_NAMES
UpperCAmelCase__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : int = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : str = ['input_ids', 'attention_mask']
UpperCAmelCase__ : str = NllbTokenizer
UpperCAmelCase__ : List[int] = []
UpperCAmelCase__ : List[int] = []
def __init__( self: Dict , UpperCamelCase_: Union[str, Any]=None , UpperCamelCase_: Optional[int]=None , UpperCamelCase_: Any="<s>" , UpperCamelCase_: Any="</s>" , UpperCamelCase_: List[Any]="</s>" , UpperCamelCase_: Optional[Any]="<s>" , UpperCamelCase_: int="<unk>" , UpperCamelCase_: Union[str, Any]="<pad>" , UpperCamelCase_: Union[str, Any]="<mask>" , UpperCamelCase_: Union[str, Any]=None , UpperCamelCase_: str=None , UpperCamelCase_: Dict=None , UpperCamelCase_: Optional[Any]=False , **UpperCamelCase_: int , ):
# Mask token behave like a normal word, i.e. include the space before it
__lowerCamelCase = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
__lowerCamelCase = legacy_behaviour
super().__init__(
vocab_file=UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , src_lang=UpperCamelCase_ , tgt_lang=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , legacy_behaviour=UpperCamelCase_ , **UpperCamelCase_ , )
__lowerCamelCase = vocab_file
__lowerCamelCase = False if not self.vocab_file else True
__lowerCamelCase = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
__lowerCamelCase = {
lang_code: self.convert_tokens_to_ids(UpperCamelCase_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__lowerCamelCase = src_lang if src_lang is not None else """eng_Latn"""
__lowerCamelCase = self.convert_tokens_to_ids(self._src_lang )
__lowerCamelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def lowerCAmelCase__ ( self: int ):
return self._src_lang
@src_lang.setter
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: str ):
__lowerCamelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCAmelCase__ ( self: int , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ):
__lowerCamelCase = [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: str , UpperCamelCase_: str , UpperCamelCase_: Optional[str] , UpperCamelCase_: Optional[str] , **UpperCamelCase_: int ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
__lowerCamelCase = src_lang
__lowerCamelCase = self(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ )
__lowerCamelCase = self.convert_tokens_to_ids(UpperCamelCase_ )
__lowerCamelCase = tgt_lang_id
return inputs
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: List[str] , UpperCamelCase_: str = "eng_Latn" , UpperCamelCase_: Optional[List[str]] = None , UpperCamelCase_: str = "fra_Latn" , **UpperCamelCase_: Optional[int] , ):
__lowerCamelCase = src_lang
__lowerCamelCase = tgt_lang
return super().prepare_seqaseq_batch(UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Dict ):
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCAmelCase__ ( self: List[str] ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Optional[int] ):
__lowerCamelCase = self.convert_tokens_to_ids(UpperCamelCase_ )
if self.legacy_behaviour:
__lowerCamelCase = []
__lowerCamelCase = [self.eos_token_id, self.cur_lang_code]
else:
__lowerCamelCase = [self.cur_lang_code]
__lowerCamelCase = [self.eos_token_id]
__lowerCamelCase = self.convert_ids_to_tokens(self.prefix_tokens )
__lowerCamelCase = self.convert_ids_to_tokens(self.suffix_tokens )
__lowerCamelCase = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: str ):
__lowerCamelCase = self.convert_tokens_to_ids(UpperCamelCase_ )
if self.legacy_behaviour:
__lowerCamelCase = []
__lowerCamelCase = [self.eos_token_id, self.cur_lang_code]
else:
__lowerCamelCase = [self.cur_lang_code]
__lowerCamelCase = [self.eos_token_id]
__lowerCamelCase = self.convert_ids_to_tokens(self.prefix_tokens )
__lowerCamelCase = self.convert_ids_to_tokens(self.suffix_tokens )
__lowerCamelCase = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: str , UpperCamelCase_: Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
__lowerCamelCase = os.path.join(
UpperCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ):
copyfile(self.vocab_file , UpperCamelCase_ )
return (out_vocab_file,)
| 12
|
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__snake_case = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json'''
with io.open(filename, '''r''', encoding='''utf-8''') as f:
__snake_case = json.load(f)
@require_torch
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: int,A_: int ):
'''simple docstring'''
return FSMTTokenizer.from_pretrained(A_ )
def snake_case_ ( self: Dict,A_: int ):
'''simple docstring'''
__UpperCamelCase = FSMTForConditionalGeneration.from_pretrained(A_ ).to(A_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 2_6.0],
['ru-en', 2_2.0],
['en-de', 2_2.0],
['de-en', 2_9.0],
] )
@slow
def snake_case_ ( self: Tuple,A_: Any,A_: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = F'''facebook/wmt19-{pair}'''
__UpperCamelCase = self.get_tokenizer(A_ )
__UpperCamelCase = self.get_model(A_ )
__UpperCamelCase = bleu_data[pair]['src']
__UpperCamelCase = bleu_data[pair]['tgt']
__UpperCamelCase = tokenizer(A_,return_tensors='pt',truncation=A_,padding='longest' ).to(A_ )
__UpperCamelCase = model.generate(
input_ids=batch.input_ids,num_beams=8,)
__UpperCamelCase = tokenizer.batch_decode(
A_,skip_special_tokens=A_,clean_up_tokenization_spaces=A_ )
__UpperCamelCase = calculate_bleu(A_,A_ )
print(A_ )
self.assertGreaterEqual(scores['bleu'],A_ )
| 310
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
lowerCAmelCase : str = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 13
|
def _A ( _lowercase ) -> list[int]:
"""simple docstring"""
if length <= 0 or not isinstance(_lowercase , _lowercase ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(_lowercase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=1_0))
| 310
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : Optional[int] = {
"""studio-ousia/luke-base""": """https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json""",
"""studio-ousia/luke-large""": """https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json""",
}
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''luke'''
def __init__( self : Any , UpperCAmelCase__ : str=50_267 , UpperCAmelCase__ : Any=500_000 , UpperCAmelCase__ : Dict=768 , UpperCAmelCase__ : str=256 , UpperCAmelCase__ : Dict=12 , UpperCAmelCase__ : List[str]=12 , UpperCAmelCase__ : Optional[Any]=3_072 , UpperCAmelCase__ : Union[str, Any]="gelu" , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Optional[int]=512 , UpperCAmelCase__ : Tuple=2 , UpperCAmelCase__ : str=0.02 , UpperCAmelCase__ : Optional[Any]=1e-12 , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : str=1 , UpperCAmelCase__ : Union[str, Any]=0 , UpperCAmelCase__ : Optional[int]=2 , **UpperCAmelCase__ : Any , ) ->List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__)
A__ = vocab_size
A__ = entity_vocab_size
A__ = hidden_size
A__ = entity_emb_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = use_entity_aware_attention
A__ = classifier_dropout
| 14
|
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = MgpstrTokenizer
_lowercase = False
_lowercase = {}
_lowercase = False
def snake_case_ ( self: int ):
'''simple docstring'''
super().setUp()
# fmt: off
__UpperCamelCase = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
__UpperCamelCase = dict(zip(A_,range(len(A_ ) ) ) )
__UpperCamelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file,'w',encoding='utf-8' ) as fp:
fp.write(json.dumps(A_ ) + '\n' )
def snake_case_ ( self: Dict,**A_: Tuple ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname,**A_ )
def snake_case_ ( self: List[Any],A_: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = 'tester'
__UpperCamelCase = 'tester'
return input_text, output_text
@unittest.skip('MGP-STR always lower cases letters.' )
def snake_case_ ( self: str ):
'''simple docstring'''
pass
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = self.get_tokenizers(do_lower_case=A_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__UpperCamelCase = '[SPECIAL_TOKEN]'
tokenizer.add_special_tokens({'cls_token': special_token} )
__UpperCamelCase = tokenizer.encode([special_token],add_special_tokens=A_ )
self.assertEqual(len(A_ ),1 )
__UpperCamelCase = tokenizer.decode(A_,skip_special_tokens=A_ )
self.assertTrue(special_token not in decoded )
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__UpperCamelCase, __UpperCamelCase = self.get_input_output_texts(A_ )
__UpperCamelCase = tokenizer.tokenize(A_ )
__UpperCamelCase = tokenizer.convert_tokens_to_ids(A_ )
__UpperCamelCase = tokenizer.encode(A_,add_special_tokens=A_ )
self.assertListEqual(A_,A_ )
__UpperCamelCase = tokenizer.convert_ids_to_tokens(A_ )
self.assertNotEqual(len(A_ ),0 )
__UpperCamelCase = tokenizer.decode(A_ )
self.assertIsInstance(A_,A_ )
self.assertEqual(text_a.replace(' ','' ),A_ )
@unittest.skip('MGP-STR tokenizer only handles one sequence.' )
def snake_case_ ( self: int ):
'''simple docstring'''
pass
@unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer' )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
pass
| 310
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE :List[Any] = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Dict = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE :List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 15
|
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"""The RoBERTa Model transformer with early exiting (DeeRoBERTa). """ , _a , )
class __lowerCamelCase (_a ):
_lowercase = RobertaConfig
_lowercase = """roberta"""
def __init__( self: Union[str, Any],A_: List[str] ):
'''simple docstring'''
super().__init__(A_ )
__UpperCamelCase = RobertaEmbeddings(A_ )
self.init_weights()
@add_start_docstrings(
"""RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. """ , _a , )
class __lowerCamelCase (_a ):
_lowercase = RobertaConfig
_lowercase = """roberta"""
def __init__( self: Any,A_: int ):
'''simple docstring'''
super().__init__(A_ )
__UpperCamelCase = config.num_labels
__UpperCamelCase = config.num_hidden_layers
__UpperCamelCase = DeeRobertaModel(A_ )
__UpperCamelCase = nn.Dropout(config.hidden_dropout_prob )
__UpperCamelCase = nn.Linear(config.hidden_size,self.config.num_labels )
@add_start_docstrings_to_model_forward(A_ )
def snake_case_ ( self: List[str],A_: int=None,A_: List[Any]=None,A_: List[str]=None,A_: List[str]=None,A_: Optional[int]=None,A_: List[str]=None,A_: Any=None,A_: List[Any]=-1,A_: List[Any]=False,):
'''simple docstring'''
__UpperCamelCase = self.num_layers
try:
__UpperCamelCase = self.roberta(
A_,attention_mask=A_,token_type_ids=A_,position_ids=A_,head_mask=A_,inputs_embeds=A_,)
__UpperCamelCase = outputs[1]
__UpperCamelCase = self.dropout(A_ )
__UpperCamelCase = self.classifier(A_ )
__UpperCamelCase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__UpperCamelCase = e.message
__UpperCamelCase = e.exit_layer
__UpperCamelCase = outputs[0]
if not self.training:
__UpperCamelCase = entropy(A_ )
__UpperCamelCase = []
__UpperCamelCase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__UpperCamelCase = MSELoss()
__UpperCamelCase = loss_fct(logits.view(-1 ),labels.view(-1 ) )
else:
__UpperCamelCase = CrossEntropyLoss()
__UpperCamelCase = loss_fct(logits.view(-1,self.num_labels ),labels.view(-1 ) )
# work with highway exits
__UpperCamelCase = []
for highway_exit in outputs[-1]:
__UpperCamelCase = highway_exit[0]
if not self.training:
highway_logits_all.append(A_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__UpperCamelCase = MSELoss()
__UpperCamelCase = loss_fct(highway_logits.view(-1 ),labels.view(-1 ) )
else:
__UpperCamelCase = CrossEntropyLoss()
__UpperCamelCase = loss_fct(highway_logits.view(-1,self.num_labels ),labels.view(-1 ) )
highway_losses.append(A_ )
if train_highway:
__UpperCamelCase = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__UpperCamelCase = (loss,) + outputs
if not self.training:
__UpperCamelCase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__UpperCamelCase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 310
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {
'configuration_electra': ['ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ElectraConfig', 'ElectraOnnxConfig'],
'tokenization_electra': ['ElectraTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['ElectraTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'ElectraForCausalLM',
'ElectraForMaskedLM',
'ElectraForMultipleChoice',
'ElectraForPreTraining',
'ElectraForQuestionAnswering',
'ElectraForSequenceClassification',
'ElectraForTokenClassification',
'ElectraModel',
'ElectraPreTrainedModel',
'load_tf_weights_in_electra',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFElectraForMaskedLM',
'TFElectraForMultipleChoice',
'TFElectraForPreTraining',
'TFElectraForQuestionAnswering',
'TFElectraForSequenceClassification',
'TFElectraForTokenClassification',
'TFElectraModel',
'TFElectraPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'FlaxElectraForCausalLM',
'FlaxElectraForMaskedLM',
'FlaxElectraForMultipleChoice',
'FlaxElectraForPreTraining',
'FlaxElectraForQuestionAnswering',
'FlaxElectraForSequenceClassification',
'FlaxElectraForTokenClassification',
'FlaxElectraModel',
'FlaxElectraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 16
|
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCamelCase :
@staticmethod
def snake_case_ ( *A_: Optional[Any],**A_: Tuple ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __lowerCamelCase (unittest.TestCase ):
_lowercase = MODEL_FOR_OBJECT_DETECTION_MAPPING
def snake_case_ ( self: Dict,A_: Optional[int],A_: Tuple,A_: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = ObjectDetectionPipeline(model=A_,image_processor=A_ )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def snake_case_ ( self: int,A_: Any,A_: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = object_detector('./tests/fixtures/tests_samples/COCO/000000039769.png',threshold=0.0 )
self.assertGreater(len(A_ ),0 )
for detected_object in outputs:
self.assertEqual(
A_,{
'score': ANY(A_ ),
'label': ANY(A_ ),
'box': {'xmin': ANY(A_ ), 'ymin': ANY(A_ ), 'xmax': ANY(A_ ), 'ymax': ANY(A_ )},
},)
import datasets
__UpperCamelCase = datasets.load_dataset('hf-internal-testing/fixtures_image_utils','image',split='test' )
__UpperCamelCase = [
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
]
__UpperCamelCase = object_detector(A_,threshold=0.0 )
self.assertEqual(len(A_ ),len(A_ ) )
for outputs in batch_outputs:
self.assertGreater(len(A_ ),0 )
for detected_object in outputs:
self.assertEqual(
A_,{
'score': ANY(A_ ),
'label': ANY(A_ ),
'box': {'xmin': ANY(A_ ), 'ymin': ANY(A_ ), 'xmax': ANY(A_ ), 'ymax': ANY(A_ )},
},)
@require_tf
@unittest.skip('Object detection not implemented in TF' )
def snake_case_ ( self: str ):
'''simple docstring'''
pass
@require_torch
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = 'hf-internal-testing/tiny-detr-mobilenetsv3'
__UpperCamelCase = AutoModelForObjectDetection.from_pretrained(A_ )
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(A_ )
__UpperCamelCase = ObjectDetectionPipeline(model=A_,feature_extractor=A_ )
__UpperCamelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg',threshold=0.0 )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],)
__UpperCamelCase = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
],threshold=0.0,)
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
[
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
[
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
],)
@require_torch
@slow
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = 'facebook/detr-resnet-50'
__UpperCamelCase = AutoModelForObjectDetection.from_pretrained(A_ )
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(A_ )
__UpperCamelCase = ObjectDetectionPipeline(model=A_,feature_extractor=A_ )
__UpperCamelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],)
__UpperCamelCase = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
],)
@require_torch
@slow
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = 'facebook/detr-resnet-50'
__UpperCamelCase = pipeline('object-detection',model=A_ )
__UpperCamelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],)
__UpperCamelCase = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
],)
@require_torch
@slow
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = 0.9_9_8_5
__UpperCamelCase = 'facebook/detr-resnet-50'
__UpperCamelCase = pipeline('object-detection',model=A_ )
__UpperCamelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg',threshold=A_ )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],)
@require_torch
@require_pytesseract
@slow
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = 'Narsil/layoutlmv3-finetuned-funsd'
__UpperCamelCase = 0.9_9_9_3
__UpperCamelCase = pipeline('object-detection',model=A_,threshold=A_ )
__UpperCamelCase = object_detector(
'https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png' )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.9_9_9_3, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
{'score': 0.9_9_9_3, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
],)
| 310
| 0
|
"""simple docstring"""
import os
from collections.abc import Iterator
def _A ( UpperCamelCase_ : str = ".") -> Iterator[str]:
'''simple docstring'''
for dir_path, dir_names, filenames in os.walk(UpperCamelCase_):
__lowercase = [d for d in dir_names if d != "scripts" and d[0] not in "._"]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(UpperCamelCase_)[1] in (".py", ".ipynb"):
yield os.path.join(UpperCamelCase_, UpperCamelCase_).lstrip("./")
def _A ( UpperCamelCase_ : Any) -> Union[str, Any]:
'''simple docstring'''
return F"""{i * " "}*""" if i else "\n##"
def _A ( UpperCamelCase_ : str, UpperCamelCase_ : str) -> str:
'''simple docstring'''
__lowercase = old_path.split(os.sep)
for i, new_part in enumerate(new_path.split(os.sep)):
if (i + 1 > len(UpperCamelCase_) or old_parts[i] != new_part) and new_part:
print(F"""{md_prefix(UpperCamelCase_)} {new_part.replace("_", " ").title()}""")
return new_path
def _A ( UpperCamelCase_ : str = ".") -> None:
'''simple docstring'''
__lowercase = ""
for filepath in sorted(good_file_paths(UpperCamelCase_)):
__lowercase ,__lowercase = os.path.split(UpperCamelCase_)
if filepath != old_path:
__lowercase = print_path(UpperCamelCase_, UpperCamelCase_)
__lowercase = (filepath.count(os.sep) + 1) if filepath else 0
__lowercase = F"""{filepath}/{filename}""".replace(" ", "%20")
__lowercase = os.path.splitext(filename.replace("_", " ").title())[0]
print(F"""{md_prefix(UpperCamelCase_)} [{filename}]({url})""")
if __name__ == "__main__":
print_directory_md('.')
| 17
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/config.json''',
'''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/config.json''',
'''xlm-roberta-large-finetuned-conll02-dutch''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll02-spanish''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-english''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-german''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'''
),
}
class __lowerCamelCase (_a ):
_lowercase = """xlm-roberta"""
def __init__( self: Union[str, Any],A_: Union[str, Any]=3_0522,A_: Dict=768,A_: Union[str, Any]=12,A_: Any=12,A_: str=3072,A_: Union[str, Any]="gelu",A_: str=0.1,A_: Optional[int]=0.1,A_: List[Any]=512,A_: Optional[Any]=2,A_: Dict=0.0_2,A_: List[Any]=1E-12,A_: Optional[int]=1,A_: str=0,A_: str=2,A_: Optional[Any]="absolute",A_: Union[str, Any]=True,A_: int=None,**A_: Optional[Any],):
'''simple docstring'''
super().__init__(pad_token_id=A_,bos_token_id=A_,eos_token_id=A_,**A_ )
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_act
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = position_embedding_type
__UpperCamelCase = use_cache
__UpperCamelCase = classifier_dropout
class __lowerCamelCase (_a ):
@property
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
__UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 310
| 0
|
def _snake_case ( lowerCAmelCase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = 1
SCREAMING_SNAKE_CASE_ : Optional[Any] = 2
while i * i <= n:
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = 1
SCREAMING_SNAKE_CASE_ : List[str] = 1
while True:
i += 1
t_num += i
if count_divisors(lowerCAmelCase ) > 5_0_0:
break
return t_num
if __name__ == "__main__":
print(solution())
| 18
|
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__snake_case = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __lowerCamelCase (_a ):
_lowercase = field(default=_a , metadata={"""help""": """Whether to use SortishSampler or not."""} )
_lowercase = field(
default=_a , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
_lowercase = field(
default=_a , metadata={
"""help""": (
"""The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `max_length` value of the model configuration."""
)
} , )
_lowercase = field(
default=_a , metadata={
"""help""": (
"""The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `num_beams` value of the model configuration."""
)
} , )
_lowercase = field(
default=_a , metadata={
"""help""": """Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."""
} , )
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = super().to_dict()
for k, v in d.items():
if isinstance(A_,A_ ):
__UpperCamelCase = v.to_dict()
return d
| 310
| 0
|
import argparse
import os
import re
import packaging.version
__A ='''examples/'''
__A ={
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
__A ={
'''init''': '''src/diffusers/__init__.py''',
'''setup''': '''setup.py''',
}
__A ='''README.md'''
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
with open(lowerCamelCase__ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCamelCase_ = f.read()
lowerCamelCase_ , lowerCamelCase_ = REPLACE_PATTERNS[pattern]
lowerCamelCase_ = replace.replace("VERSION" , lowerCamelCase__ )
lowerCamelCase_ = re_pattern.sub(lowerCamelCase__ , lowerCamelCase__ )
with open(lowerCamelCase__ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(lowerCamelCase__ )
def lowerCamelCase_ ( lowerCamelCase__ ):
for folder, directories, fnames in os.walk(lowerCamelCase__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ , pattern="examples" )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if not patch:
update_version_in_examples(lowerCamelCase__ )
def lowerCamelCase_ ( ):
lowerCamelCase_ = "🤗 Transformers currently provides the following architectures"
lowerCamelCase_ = "1. Want to contribute a new model?"
with open(lowerCamelCase__ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCamelCase_ = f.readlines()
# Find the start of the list.
lowerCamelCase_ = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowerCamelCase_ = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
lowerCamelCase_ = lines[index].replace(
"https://huggingface.co/docs/diffusers/main/model_doc" , "https://huggingface.co/docs/diffusers/model_doc" , )
index += 1
with open(lowerCamelCase__ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lowerCamelCase__ )
def lowerCamelCase_ ( ):
with open(REPLACE_FILES["init"] , "r" ) as f:
lowerCamelCase_ = f.read()
lowerCamelCase_ = REPLACE_PATTERNS["init"][0].search(lowerCamelCase__ ).groups()[0]
return packaging.version.parse(lowerCamelCase__ )
def lowerCamelCase_ ( lowerCamelCase__=False ):
lowerCamelCase_ = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
lowerCamelCase_ = default_version.base_version
elif patch:
lowerCamelCase_ = F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
lowerCamelCase_ = F'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
lowerCamelCase_ = input(F'Which version are you releasing? [{default_version}]' )
if len(lowerCamelCase__ ) == 0:
lowerCamelCase_ = default_version
print(F'Updating version to {version}.' )
global_version_update(lowerCamelCase__ , patch=lowerCamelCase__ )
def lowerCamelCase_ ( ):
lowerCamelCase_ = get_version()
lowerCamelCase_ = F'{current_version.major}.{current_version.minor + 1}.0.dev0'
lowerCamelCase_ = current_version.base_version
# Check with the user we got that right.
lowerCamelCase_ = input(F'Which version are we developing now? [{dev_version}]' )
if len(lowerCamelCase__ ) == 0:
lowerCamelCase_ = dev_version
print(F'Updating version to {version}.' )
global_version_update(lowerCamelCase__ )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
__A =argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
__A =parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 19
|
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def _A ( _lowercase ) -> Dict:
"""simple docstring"""
if is_torch_version('<' , '2.0.0' ) or not hasattr(_lowercase , '_dynamo' ):
return False
return isinstance(_lowercase , torch._dynamo.eval_frame.OptimizedModule )
def _A ( _lowercase , _lowercase = True ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
__UpperCamelCase = is_compiled_module(_lowercase )
if is_compiled:
__UpperCamelCase = model
__UpperCamelCase = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(_lowercase , _lowercase ):
__UpperCamelCase = model.module
if not keep_fpaa_wrapper:
__UpperCamelCase = getattr(_lowercase , 'forward' )
__UpperCamelCase = model.__dict__.pop('_original_forward' , _lowercase )
if original_forward is not None:
while hasattr(_lowercase , '__wrapped__' ):
__UpperCamelCase = forward.__wrapped__
if forward == original_forward:
break
__UpperCamelCase = forward
if getattr(_lowercase , '_converted_to_transformer_engine' , _lowercase ):
convert_model(_lowercase , to_transformer_engine=_lowercase )
if is_compiled:
__UpperCamelCase = model
__UpperCamelCase = compiled_model
return model
def _A ( ) -> Any:
"""simple docstring"""
PartialState().wait_for_everyone()
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
if PartialState().distributed_type == DistributedType.TPU:
xm.save(_lowercase , _lowercase )
elif PartialState().local_process_index == 0:
torch.save(_lowercase , _lowercase )
@contextmanager
def _A ( **_lowercase ) -> Union[str, Any]:
"""simple docstring"""
for key, value in kwargs.items():
__UpperCamelCase = str(_lowercase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def _A ( _lowercase ) -> Tuple:
"""simple docstring"""
if not hasattr(_lowercase , '__qualname__' ) and not hasattr(_lowercase , '__name__' ):
__UpperCamelCase = getattr(_lowercase , '__class__' , _lowercase )
if hasattr(_lowercase , '__qualname__' ):
return obj.__qualname__
if hasattr(_lowercase , '__name__' ):
return obj.__name__
return str(_lowercase )
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
for key, value in source.items():
if isinstance(_lowercase , _lowercase ):
__UpperCamelCase = destination.setdefault(_lowercase , {} )
merge_dicts(_lowercase , _lowercase )
else:
__UpperCamelCase = value
return destination
def _A ( _lowercase = None ) -> bool:
"""simple docstring"""
if port is None:
__UpperCamelCase = 2_95_00
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 310
| 0
|
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class __snake_case :
_a : int
_a : TreeNode | None= None
_a : TreeNode | None= None
lowercase : Dict = namedtuple("""CoinsDistribResult""", """moves excess""")
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> int:
if root is None:
return 0
# Validation
def count_nodes(SCREAMING_SNAKE_CASE__ ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(SCREAMING_SNAKE_CASE__ ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(SCREAMING_SNAKE_CASE__ ) != count_coins(SCREAMING_SNAKE_CASE__ ):
raise ValueError("""The nodes number should be same as the number of coins""" )
# Main calculation
def get_distrib(SCREAMING_SNAKE_CASE__ ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
lowercase , lowercase : int = get_distrib(node.left )
lowercase , lowercase : List[Any] = get_distrib(node.right )
lowercase : Optional[Any] = 1 - left_distrib_excess
lowercase : Union[str, Any] = 1 - right_distrib_excess
lowercase : List[Any] = (
left_distrib_moves
+ right_distrib_moves
+ abs(SCREAMING_SNAKE_CASE__ )
+ abs(SCREAMING_SNAKE_CASE__ )
)
lowercase : Any = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return get_distrib(SCREAMING_SNAKE_CASE__ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__snake_case = logging.getLogger(__name__)
def _A ( _lowercase , _lowercase ) -> Optional[int]:
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class __lowerCamelCase :
_lowercase = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class __lowerCamelCase :
_lowercase = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} )
_lowercase = field(metadata={"""help""": """Should contain the data files for the task."""} )
_lowercase = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_lowercase = field(
default=_a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _A ( ) -> str:
"""simple docstring"""
__UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _lowercase )
# Set seed
set_seed(training_args.seed )
try:
__UpperCamelCase = processors[data_args.task_name]()
__UpperCamelCase = processor.get_labels()
__UpperCamelCase = len(_lowercase )
except KeyError:
raise ValueError('Task not found: %s' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowercase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
__UpperCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__UpperCamelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , )
# Get datasets
__UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_lowercase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_lowercase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(_lowercase ) -> Dict:
__UpperCamelCase = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(_lowercase , p.label_ids )}
# Data collator
__UpperCamelCase = DataCollatorWithPadding(_lowercase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__UpperCamelCase = Trainer(
model=_lowercase , args=_lowercase , train_dataset=_lowercase , eval_dataset=_lowercase , compute_metrics=_lowercase , data_collator=_lowercase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__UpperCamelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__UpperCamelCase = trainer.evaluate()
__UpperCamelCase = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_master():
with open(_lowercase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , _lowercase , _lowercase )
writer.write('%s = %s\n' % (key, value) )
results.update(_lowercase )
return results
def _A ( _lowercase ) -> List[Any]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 310
| 0
|
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> str:
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
_lowercase : int = str(bin(lowerCamelCase_ ) )[2:] # remove the leading "0b"
_lowercase : Any = str(bin(lowerCamelCase_ ) )[2:]
_lowercase : List[Any] = max(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
return "0b" + "".join(
str(int('1' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(lowerCamelCase_ ) , b_binary.zfill(lowerCamelCase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21
|
import os
def _A ( ) -> Tuple:
"""simple docstring"""
with open(os.path.dirname(_lowercase ) + '/p022_names.txt' ) as file:
__UpperCamelCase = str(file.readlines()[0] )
__UpperCamelCase = names.replace('"' , '' ).split(',' )
names.sort()
__UpperCamelCase = 0
__UpperCamelCase = 0
for i, name in enumerate(_lowercase ):
for letter in name:
name_score += ord(_lowercase ) - 64
total_score += (i + 1) * name_score
__UpperCamelCase = 0
return total_score
if __name__ == "__main__":
print(solution())
| 310
| 0
|
'''simple docstring'''
from __future__ import annotations
__SCREAMING_SNAKE_CASE :Union[str, Any] = '''Muhammad Umer Farooq'''
__SCREAMING_SNAKE_CASE :Optional[int] = '''MIT'''
__SCREAMING_SNAKE_CASE :Any = '''1.0.0'''
__SCREAMING_SNAKE_CASE :Union[str, Any] = '''Muhammad Umer Farooq'''
__SCREAMING_SNAKE_CASE :Optional[int] = '''contact@muhammadumerfarooq.me'''
__SCREAMING_SNAKE_CASE :Any = '''Alpha'''
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class A_ ( lowerCAmelCase_ ):
def __init__( self : Tuple , snake_case_ : str ):
super().__init__()
_UpperCAmelCase = []
_UpperCAmelCase = domain
def lowercase ( self : List[Any] , snake_case_ : str , snake_case_ : list[tuple[str, str | None]] ):
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
_UpperCAmelCase = parse.urljoin(self.domain , snake_case_ )
self.urls.append(snake_case_ )
def UpperCAmelCase_ ( __lowercase : str ) -> str:
'''simple docstring'''
return ".".join(get_sub_domain_name(__lowercase ).split("." )[-2:] )
def UpperCAmelCase_ ( __lowercase : str ) -> str:
'''simple docstring'''
return parse.urlparse(__lowercase ).netloc
def UpperCAmelCase_ ( __lowercase : str = "https://github.com" ) -> list[str]:
'''simple docstring'''
_UpperCAmelCase = get_domain_name(__lowercase )
# Initialize the parser
_UpperCAmelCase = Parser(__lowercase )
try:
# Open URL
_UpperCAmelCase = requests.get(__lowercase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
_UpperCAmelCase = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
_UpperCAmelCase = requests.get(__lowercase )
# Get the valid email.
_UpperCAmelCase = re.findall("[a-zA-Z0-9]+@" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(__lowercase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(__lowercase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE :str = emails_from_url('''https://github.com''')
print(F"{len(emails)} emails found:")
print('''\n'''.join(sorted(emails)))
| 22
|
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def _A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=True , _lowercase="pt" ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = {'add_prefix_space': True} if isinstance(_lowercase , _lowercase ) and not line.startswith(' ' ) else {}
__UpperCamelCase = padding_side
return tokenizer(
[line] , max_length=_lowercase , padding='max_length' if pad_to_max_length else None , truncation=_lowercase , return_tensors=_lowercase , add_special_tokens=_lowercase , **_lowercase , )
def _A ( _lowercase , _lowercase , _lowercase=None , ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = input_ids.ne(_lowercase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCamelCase (_a ):
def __init__( self: List[str],A_: str,A_: List[str],A_: List[str],A_: List[str],A_: Tuple="train",A_: Any=None,A_: List[str]=None,A_: List[Any]=None,A_: int="",):
'''simple docstring'''
super().__init__()
__UpperCamelCase = Path(A_ ).joinpath(type_path + '.source' )
__UpperCamelCase = Path(A_ ).joinpath(type_path + '.target' )
__UpperCamelCase = self.get_char_lens(self.src_file )
__UpperCamelCase = max_source_length
__UpperCamelCase = max_target_length
assert min(self.src_lens ) > 0, F'''found empty line in {self.src_file}'''
__UpperCamelCase = tokenizer
__UpperCamelCase = prefix
if n_obs is not None:
__UpperCamelCase = self.src_lens[:n_obs]
__UpperCamelCase = src_lang
__UpperCamelCase = tgt_lang
def __len__( self: Optional[Any] ):
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self: int,A_: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = index + 1 # linecache starts at 1
__UpperCamelCase = self.prefix + linecache.getline(str(self.src_file ),A_ ).rstrip('\n' )
__UpperCamelCase = linecache.getline(str(self.tgt_file ),A_ ).rstrip('\n' )
assert source_line, F'''empty source line for index {index}'''
assert tgt_line, F'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer,A_ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__UpperCamelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer,A_ ) else self.tokenizer
)
__UpperCamelCase = self.tokenizer.generator if isinstance(self.tokenizer,A_ ) else self.tokenizer
__UpperCamelCase = encode_line(A_,A_,self.max_source_length,'right' )
__UpperCamelCase = encode_line(A_,A_,self.max_target_length,'right' )
__UpperCamelCase = source_inputs['input_ids'].squeeze()
__UpperCamelCase = target_inputs['input_ids'].squeeze()
__UpperCamelCase = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def snake_case_ ( A_: List[Any] ):
'''simple docstring'''
return [len(A_ ) for x in Path(A_ ).open().readlines()]
def snake_case_ ( self: Union[str, Any],A_: Any ):
'''simple docstring'''
__UpperCamelCase = torch.stack([x['input_ids'] for x in batch] )
__UpperCamelCase = torch.stack([x['attention_mask'] for x in batch] )
__UpperCamelCase = torch.stack([x['decoder_input_ids'] for x in batch] )
__UpperCamelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer,A_ )
else self.tokenizer.pad_token_id
)
__UpperCamelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer,A_ )
else self.tokenizer.pad_token_id
)
__UpperCamelCase = trim_batch(A_,A_ )
__UpperCamelCase, __UpperCamelCase = trim_batch(A_,A_,attention_mask=A_ )
__UpperCamelCase = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
__snake_case = getLogger(__name__)
def _A ( _lowercase ) -> Any:
"""simple docstring"""
return list(itertools.chain.from_iterable(_lowercase ) )
def _A ( _lowercase ) -> None:
"""simple docstring"""
__UpperCamelCase = get_git_info()
save_json(_lowercase , os.path.join(_lowercase , 'git_log.json' ) )
def _A ( _lowercase , _lowercase , _lowercase=4 , **_lowercase ) -> List[Any]:
"""simple docstring"""
with open(_lowercase , 'w' ) as f:
json.dump(_lowercase , _lowercase , indent=_lowercase , **_lowercase )
def _A ( _lowercase ) -> Union[str, Any]:
"""simple docstring"""
with open(_lowercase ) as f:
return json.load(_lowercase )
def _A ( ) -> Dict:
"""simple docstring"""
__UpperCamelCase = git.Repo(search_parent_directories=_lowercase )
__UpperCamelCase = {
'repo_id': str(_lowercase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def _A ( _lowercase , _lowercase ) -> List:
"""simple docstring"""
return list(map(_lowercase , _lowercase ) )
def _A ( _lowercase , _lowercase ) -> Tuple:
"""simple docstring"""
with open(_lowercase , 'wb' ) as f:
return pickle.dump(_lowercase , _lowercase )
def _A ( _lowercase ) -> List[Any]:
"""simple docstring"""
def remove_articles(_lowercase ):
return re.sub(r'\b(a|an|the)\b' , ' ' , _lowercase )
def white_space_fix(_lowercase ):
return " ".join(text.split() )
def remove_punc(_lowercase ):
__UpperCamelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_lowercase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_lowercase ) ) ) )
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = normalize_answer(_lowercase ).split()
__UpperCamelCase = normalize_answer(_lowercase ).split()
__UpperCamelCase = Counter(_lowercase ) & Counter(_lowercase )
__UpperCamelCase = sum(common.values() )
if num_same == 0:
return 0
__UpperCamelCase = 1.0 * num_same / len(_lowercase )
__UpperCamelCase = 1.0 * num_same / len(_lowercase )
__UpperCamelCase = (2 * precision * recall) / (precision + recall)
return fa
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
return normalize_answer(_lowercase ) == normalize_answer(_lowercase )
def _A ( _lowercase , _lowercase ) -> Dict:
"""simple docstring"""
assert len(_lowercase ) == len(_lowercase )
__UpperCamelCase = 0
for hypo, pred in zip(_lowercase , _lowercase ):
em += exact_match_score(_lowercase , _lowercase )
if len(_lowercase ) > 0:
em /= len(_lowercase )
return {"em": em}
def _A ( _lowercase ) -> Optional[Any]:
"""simple docstring"""
return model_prefix.startswith('rag' )
def _A ( _lowercase , _lowercase , _lowercase ) -> Dict:
"""simple docstring"""
__UpperCamelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__UpperCamelCase = 'dropout_rate'
for p in extra_params:
if getattr(_lowercase , _lowercase , _lowercase ):
if not hasattr(_lowercase , _lowercase ) and not hasattr(_lowercase , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(_lowercase ) )
delattr(_lowercase , _lowercase )
continue
__UpperCamelCase = p if hasattr(_lowercase , _lowercase ) else equivalent_param[p]
setattr(_lowercase , _lowercase , getattr(_lowercase , _lowercase ) )
delattr(_lowercase , _lowercase )
return hparams, config
| 310
| 0
|
'''simple docstring'''
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class SCREAMING_SNAKE_CASE( A__ , A__ , A__ ):
"""simple docstring"""
@register_to_config
def __init__( self : str , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : float , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : str , __snake_case : bool = False , ) -> int:
super().__init__()
UpperCAmelCase : Any = nn.Embedding(__snake_case , __snake_case )
UpperCAmelCase : Tuple = nn.Embedding(__snake_case , __snake_case )
UpperCAmelCase : List[Any] = False
UpperCAmelCase : Dict = nn.Dropout(p=__snake_case )
UpperCAmelCase : str = TaConfig(
vocab_size=__snake_case , d_model=__snake_case , num_heads=__snake_case , d_kv=__snake_case , d_ff=__snake_case , dropout_rate=__snake_case , feed_forward_proj=__snake_case , is_decoder=__snake_case , is_encoder_decoder=__snake_case , )
UpperCAmelCase : List[str] = nn.ModuleList()
for lyr_num in range(__snake_case ):
UpperCAmelCase : int = TaBlock(__snake_case )
self.encoders.append(__snake_case )
UpperCAmelCase : Dict = TaLayerNorm(__snake_case )
UpperCAmelCase : Dict = nn.Dropout(p=__snake_case )
def A ( self : Union[str, Any] , __snake_case : List[Any] , __snake_case : str ) -> Dict:
UpperCAmelCase : Dict = self.token_embedder(__snake_case )
UpperCAmelCase : Tuple = encoder_input_tokens.shape[1]
UpperCAmelCase : Union[str, Any] = torch.arange(__snake_case , device=encoder_input_tokens.device )
x += self.position_encoding(__snake_case )
UpperCAmelCase : int = self.dropout_pre(__snake_case )
# inverted the attention mask
UpperCAmelCase : List[str] = encoder_input_tokens.size()
UpperCAmelCase : str = self.get_extended_attention_mask(__snake_case , __snake_case )
for lyr in self.encoders:
UpperCAmelCase : Dict = lyr(__snake_case , __snake_case )[0]
UpperCAmelCase : Optional[int] = self.layer_norm(__snake_case )
return self.dropout_post(__snake_case ), encoder_inputs_mask
| 23
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 310
| 0
|
from manim import *
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def a (self : List[str] ):
"""simple docstring"""
__snake_case = Rectangle(height=0.5 , width=0.5 )
__snake_case = Rectangle(height=0.2_5 , width=0.2_5 )
__snake_case = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
__snake_case = [mem.copy() for i in range(6 )]
__snake_case = [mem.copy() for i in range(6 )]
__snake_case = VGroup(*a__ ).arrange(a__ , buff=0 )
__snake_case = VGroup(*a__ ).arrange(a__ , buff=0 )
__snake_case = VGroup(a__ , a__ ).arrange(a__ , buff=0 )
__snake_case = Text('''CPU''' , font_size=24 )
__snake_case = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(a__ )
__snake_case = [mem.copy() for i in range(4 )]
__snake_case = VGroup(*a__ ).arrange(a__ , buff=0 )
__snake_case = Text('''GPU''' , font_size=24 )
__snake_case = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ )
gpu.move_to([-1, -1, 0] )
self.add(a__ )
__snake_case = [mem.copy() for i in range(6 )]
__snake_case = VGroup(*a__ ).arrange(a__ , buff=0 )
__snake_case = Text('''Model''' , font_size=24 )
__snake_case = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ )
model.move_to([3, -1.0, 0] )
self.add(a__ )
__snake_case = []
__snake_case = []
__snake_case = []
for i, rect in enumerate(a__ ):
rect.set_stroke(a__ )
__snake_case = Rectangle(height=0.4_6 / 4 , width=0.4_6 / 3 ).set_stroke(width=0.0 ).set_fill(a__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=a__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=a__ , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=a__ , buff=0.0 )
self.add(a__ )
model_cpu_arr.append(a__ )
self.add(*a__ , *a__ , *a__ )
__snake_case = [mem.copy() for i in range(6 )]
__snake_case = VGroup(*a__ ).arrange(a__ , buff=0 )
__snake_case = Text('''Loaded Checkpoint''' , font_size=24 )
__snake_case = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ )
checkpoint.move_to([3, 0.5, 0] )
self.add(a__ )
__snake_case = []
__snake_case = []
for i, rect in enumerate(a__ ):
__snake_case = fill.copy().set_fill(a__ , opacity=0.7 )
target.move_to(a__ )
ckpt_arr.append(a__ )
__snake_case = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(a__ )
self.add(*a__ , *a__ )
__snake_case = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__snake_case = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(a__ , a__ )
__snake_case = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(a__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(a__ )
__snake_case = MarkupText(
f"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
__snake_case = [meta_mem.copy() for i in range(6 )]
__snake_case = [meta_mem.copy() for i in range(6 )]
__snake_case = VGroup(*a__ ).arrange(a__ , buff=0 )
__snake_case = VGroup(*a__ ).arrange(a__ , buff=0 )
__snake_case = VGroup(a__ , a__ ).arrange(a__ , buff=0 )
__snake_case = Text('''Disk''' , font_size=24 )
__snake_case = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ )
disk.move_to([-4.0, -1.2_5, 0] )
self.play(Write(a__ , run_time=3 ) , Write(a__ , run_time=1 ) , Create(a__ , run_time=1 ) )
__snake_case = []
for i, rect in enumerate(a__ ):
__snake_case = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(a__ , run_time=1.5 ) )
self.play(*a__ )
self.play(FadeOut(a__ ) )
__snake_case = MarkupText(f"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(a__ , run_time=3 ) )
self.play(
FadeOut(a__ , a__ , *a__ , *a__ ) , )
self.wait()
| 24
|
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
__snake_case = '''https://www.indeed.co.in/jobs?q=mobile+app+development&l='''
def _A ( _lowercase = "mumbai" ) -> Generator[tuple[str, str], None, None]:
"""simple docstring"""
__UpperCamelCase = BeautifulSoup(requests.get(url + location ).content , 'html.parser' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('div' , attrs={'data-tn-component': 'organicJob'} ):
__UpperCamelCase = job.find('a' , attrs={'data-tn-element': 'jobTitle'} ).text.strip()
__UpperCamelCase = job.find('span' , {'class': 'company'} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('''Bangalore'''), 1):
print(f"""Job {i:>2} is {job[0]} at {job[1]}""")
| 310
| 0
|
"""simple docstring"""
import requests
UpperCAmelCase__ : Optional[Any] = 'YOUR API KEY'
def lowercase_ ( _snake_case ,_snake_case = giphy_api_key ):
SCREAMING_SNAKE_CASE__ : Tuple = """+""".join(query.split() )
SCREAMING_SNAKE_CASE__ : Optional[int] = f'''https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}'''
SCREAMING_SNAKE_CASE__ : List[Any] = requests.get(_snake_case ).json()["""data"""]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('\n'.join(get_gifs('space ship')))
| 25
|
def _A ( _lowercase ) -> list:
"""simple docstring"""
def merge(_lowercase , _lowercase ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(_lowercase ) <= 1:
return collection
__UpperCamelCase = len(_lowercase ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case = input('''Enter numbers separated by a comma:\n''').strip()
__snake_case = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 310
| 0
|
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
_snake_case = [
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"
" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"
" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.",
"The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"
" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"
" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"
" body.",
"Amnesty International releases its annual report on the death penalty. The report catalogs the use of"
" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"
" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"
" punishment.",
]
_snake_case = [
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."
" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"
" had informed his Lufthansa training school of an episode of severe depression, airline says .",
"Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."
" Israel and the United States opposed the move, which could open the door to war crimes investigations against"
" Israelis .",
"Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"
" death . Organization claims that governments around the world are using the threat of terrorism to advance"
" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"
" sentences up by 28% .",
]
def lowerCAmelCase_ ( ):
_A : int = calculate_rouge(snake_case_,snake_case_,bootstrap_aggregation=snake_case_,rouge_keys=["""rouge2""", """rougeL"""] )
assert isinstance(snake_case_,snake_case_ )
_A : List[str] = calculate_rouge(snake_case_,snake_case_,bootstrap_aggregation=snake_case_,rouge_keys=["""rouge2"""] )
assert (
pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean()
)
def lowerCAmelCase_ ( ):
_A : Any = """rougeLsum"""
_A : List[str] = calculate_rouge(snake_case_,snake_case_,newline_sep=snake_case_,rouge_keys=[k] )[k]
_A : List[Any] = calculate_rouge(snake_case_,snake_case_,newline_sep=snake_case_,rouge_keys=[k] )[k]
assert score > score_no_sep
def lowerCAmelCase_ ( ):
_A : Dict = ["""rouge1""", """rouge2""", """rougeL"""]
_A : Dict = calculate_rouge(snake_case_,snake_case_,newline_sep=snake_case_,rouge_keys=snake_case_ )
_A : List[Any] = calculate_rouge(snake_case_,snake_case_,newline_sep=snake_case_,rouge_keys=snake_case_ )
assert score_sep == score_no_sep
def lowerCAmelCase_ ( ):
_A : Union[str, Any] = [
"""Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""",
]
_A : int = [
"""Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"""
""" the final seconds on board Flight 9525.""",
]
assert calculate_rouge(snake_case_,snake_case_,newline_sep=snake_case_ ) == calculate_rouge(snake_case_,snake_case_,newline_sep=snake_case_ )
def lowerCAmelCase_ ( ):
_A : int = [
"""\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """
]
_A : Any = [
""" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."""
]
_A : Dict = calculate_rouge(snake_case_,snake_case_,rouge_keys=["""rougeLsum"""],newline_sep=snake_case_ )["""rougeLsum"""]
_A : List[str] = calculate_rouge(snake_case_,snake_case_,rouge_keys=["""rougeLsum"""] )["""rougeLsum"""]
assert new_score > prev_score
def lowerCAmelCase_ ( ):
_A : int = Path("""examples/seq2seq/test_data/wmt_en_ro""" )
_A : Optional[Any] = calculate_rouge_path(data_dir.joinpath("""test.source""" ),data_dir.joinpath("""test.target""" ) )
assert isinstance(snake_case_,snake_case_ )
_A : Dict = calculate_rouge_path(
data_dir.joinpath("""test.source""" ),data_dir.joinpath("""test.target""" ),bootstrap_aggregation=snake_case_ )
assert isinstance(snake_case_,snake_case_ )
| 26
|
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class __lowerCamelCase (_a ):
_lowercase = 0
_lowercase = False
_lowercase = 3.0
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: Any ):
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs(),{} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs(),{'a': 2} )
self.assertDictEqual(MockClass(a=2,b=A_ ).to_kwargs(),{'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2,c=2.2_5 ).to_kwargs(),{'a': 2, 'c': 2.2_5} )
@require_cuda
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = GradScalerKwargs(init_scale=1024,growth_factor=2 )
AcceleratorState._reset_state()
__UpperCamelCase = Accelerator(mixed_precision='fp16',kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
__UpperCamelCase = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale,1_0_2_4.0 )
self.assertEqual(scaler._growth_factor,2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor,0.5 )
self.assertEqual(scaler._growth_interval,2000 )
self.assertEqual(scaler._enabled,A_ )
@require_multi_gpu
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = ['torchrun', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(A_,env=os.environ.copy() )
if __name__ == "__main__":
__snake_case = DistributedDataParallelKwargs(bucket_cap_mb=1_5, find_unused_parameters=True)
__snake_case = Accelerator(kwargs_handlers=[ddp_scaler])
__snake_case = torch.nn.Linear(1_0_0, 2_0_0)
__snake_case = accelerator.prepare(model)
# Check the values changed in kwargs
__snake_case = ''''''
__snake_case = model.bucket_bytes_cap // (1_0_2_4 * 1_0_2_4)
if observed_bucket_cap_map != 1_5:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 310
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowercase : Union[str, Any] = {
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Tuple = [
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
__lowercase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 27
|
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class __lowerCamelCase (_a ):
_lowercase = ["""image_processor""", """tokenizer"""]
_lowercase = """OwlViTImageProcessor"""
_lowercase = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self: int,A_: Tuple=None,A_: int=None,**A_: int ):
'''simple docstring'''
__UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.',A_,)
__UpperCamelCase = kwargs.pop('feature_extractor' )
__UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(A_,A_ )
def __call__( self: str,A_: Dict=None,A_: Optional[int]=None,A_: Any=None,A_: Tuple="max_length",A_: int="np",**A_: Optional[Any] ):
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(A_,A_ ) or (isinstance(A_,A_ ) and not isinstance(text[0],A_ )):
__UpperCamelCase = [self.tokenizer(A_,padding=A_,return_tensors=A_,**A_ )]
elif isinstance(A_,A_ ) and isinstance(text[0],A_ ):
__UpperCamelCase = []
# Maximum number of queries across batch
__UpperCamelCase = max([len(A_ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(A_ ) != max_num_queries:
__UpperCamelCase = t + [' '] * (max_num_queries - len(A_ ))
__UpperCamelCase = self.tokenizer(A_,padding=A_,return_tensors=A_,**A_ )
encodings.append(A_ )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
__UpperCamelCase = np.concatenate([encoding['input_ids'] for encoding in encodings],axis=0 )
__UpperCamelCase = np.concatenate([encoding['attention_mask'] for encoding in encodings],axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__UpperCamelCase = jnp.concatenate([encoding['input_ids'] for encoding in encodings],axis=0 )
__UpperCamelCase = jnp.concatenate([encoding['attention_mask'] for encoding in encodings],axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__UpperCamelCase = torch.cat([encoding['input_ids'] for encoding in encodings],dim=0 )
__UpperCamelCase = torch.cat([encoding['attention_mask'] for encoding in encodings],dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__UpperCamelCase = tf.stack([encoding['input_ids'] for encoding in encodings],axis=0 )
__UpperCamelCase = tf.stack([encoding['attention_mask'] for encoding in encodings],axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
__UpperCamelCase = BatchEncoding()
__UpperCamelCase = input_ids
__UpperCamelCase = attention_mask
if query_images is not None:
__UpperCamelCase = BatchEncoding()
__UpperCamelCase = self.image_processor(
A_,return_tensors=A_,**A_ ).pixel_values
__UpperCamelCase = query_pixel_values
if images is not None:
__UpperCamelCase = self.image_processor(A_,return_tensors=A_,**A_ )
if text is not None and images is not None:
__UpperCamelCase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__UpperCamelCase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**A_ ),tensor_type=A_ )
def snake_case_ ( self: Optional[int],*A_: int,**A_: List[Any] ):
'''simple docstring'''
return self.image_processor.post_process(*A_,**A_ )
def snake_case_ ( self: str,*A_: Optional[int],**A_: List[Any] ):
'''simple docstring'''
return self.image_processor.post_process_object_detection(*A_,**A_ )
def snake_case_ ( self: str,*A_: Tuple,**A_: int ):
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*A_,**A_ )
def snake_case_ ( self: List[str],*A_: str,**A_: List[Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*A_,**A_ )
def snake_case_ ( self: int,*A_: Any,**A_: Tuple ):
'''simple docstring'''
return self.tokenizer.decode(*A_,**A_ )
@property
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.',A_,)
return self.image_processor_class
@property
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.',A_,)
return self.image_processor
| 310
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
_lowerCamelCase : Tuple = None
_lowerCamelCase : int = logging.get_logger(__name__)
_lowerCamelCase : Any = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
_lowerCamelCase : Union[str, Any] = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json",
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json",
},
}
_lowerCamelCase : str = {
"facebook/mbart-large-en-ro": 1024,
"facebook/mbart-large-cc25": 1024,
}
# fmt: off
_lowerCamelCase : List[str] = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE = ["""input_ids""", """attention_mask"""]
_SCREAMING_SNAKE_CASE = MBartTokenizer
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
def __init__( self : Optional[int] , UpperCamelCase__ : int=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : int="<s>" , UpperCamelCase__ : str="</s>" , UpperCamelCase__ : Optional[int]="</s>" , UpperCamelCase__ : Tuple="<s>" , UpperCamelCase__ : Optional[Any]="<unk>" , UpperCamelCase__ : Dict="<pad>" , UpperCamelCase__ : Optional[Any]="<mask>" , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Any=None , **UpperCamelCase__ : Union[str, Any] , ):
"""simple docstring"""
UpperCamelCase = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
super().__init__(
vocab_file=UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , src_lang=UpperCamelCase__ , tgt_lang=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
UpperCamelCase = vocab_file
UpperCamelCase = False if not self.vocab_file else True
UpperCamelCase = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
UpperCamelCase = {
lang_code: self.convert_tokens_to_ids(UpperCamelCase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
UpperCamelCase = src_lang if src_lang is not None else 'en_XX'
UpperCamelCase = self.convert_tokens_to_ids(self._src_lang )
UpperCamelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def A ( self : str ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def A ( self : Optional[Any] , UpperCamelCase__ : str ):
"""simple docstring"""
UpperCamelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def A ( self : Dict , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def A ( self : List[Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A ( self : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] , UpperCamelCase__ : Optional[str] , **UpperCamelCase__ : int ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
UpperCamelCase = src_lang
UpperCamelCase = self(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
UpperCamelCase = self.convert_tokens_to_ids(UpperCamelCase__ )
UpperCamelCase = tgt_lang_id
return inputs
def A ( self : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : str = "en_XX" , UpperCamelCase__ : Optional[List[str]] = None , UpperCamelCase__ : str = "ro_RO" , **UpperCamelCase__ : int , ):
"""simple docstring"""
UpperCamelCase = src_lang
UpperCamelCase = tgt_lang
return super().prepare_seqaseq_batch(UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
def A ( self : int ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def A ( self : Union[str, Any] ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def A ( self : Tuple , UpperCamelCase__ : Any ):
"""simple docstring"""
UpperCamelCase = self.convert_tokens_to_ids(UpperCamelCase__ )
UpperCamelCase = []
UpperCamelCase = [self.eos_token_id, self.cur_lang_code]
UpperCamelCase = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCamelCase = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCamelCase = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def A ( self : List[str] , UpperCamelCase__ : str ):
"""simple docstring"""
UpperCamelCase = self.convert_tokens_to_ids(UpperCamelCase__ )
UpperCamelCase = []
UpperCamelCase = [self.eos_token_id, self.cur_lang_code]
UpperCamelCase = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCamelCase = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCamelCase = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def A ( self : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
UpperCamelCase = os.path.join(
UpperCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
return (out_vocab_file,)
| 28
|
import math
def _A ( _lowercase ) -> int:
"""simple docstring"""
if not isinstance(_lowercase , _lowercase ):
__UpperCamelCase = f'''Input value of [number={number}] must be an integer'''
raise TypeError(_lowercase )
if number < 1:
__UpperCamelCase = f'''Input value of [number={number}] must be > 0'''
raise ValueError(_lowercase )
elif number == 1:
return 3
elif number == 2:
return 5
else:
__UpperCamelCase = int(math.log(number // 3 , 2 ) ) + 2
__UpperCamelCase = [3, 5]
__UpperCamelCase = 2
__UpperCamelCase = 3
for block in range(1 , _lowercase ):
for _ in range(_lowercase ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(1_1):
__snake_case = 0
try:
__snake_case = proth(number)
except ValueError:
print(f"""ValueError: there is no {number}th Proth number""")
continue
print(f"""The {number}th Proth number: {value}""")
| 310
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'ctrl': 'https://huggingface.co/ctrl/resolve/main/config.json'}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Optional[int] = '''ctrl'''
_snake_case : Tuple = ['''past_key_values''']
_snake_case : Tuple = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , _UpperCamelCase=2_4_6_5_3_4 , _UpperCamelCase=2_5_6 , _UpperCamelCase=1_2_8_0 , _UpperCamelCase=8_1_9_2 , _UpperCamelCase=4_8 , _UpperCamelCase=1_6 , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=1E-6 , _UpperCamelCase=0.02 , _UpperCamelCase=True , **_UpperCamelCase , ) -> Union[str, Any]:
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : Dict = n_positions
UpperCAmelCase_ : List[str] = n_embd
UpperCAmelCase_ : Optional[Any] = n_layer
UpperCAmelCase_ : List[Any] = n_head
UpperCAmelCase_ : Any = dff
UpperCAmelCase_ : Union[str, Any] = resid_pdrop
UpperCAmelCase_ : Any = embd_pdrop
UpperCAmelCase_ : int = layer_norm_epsilon
UpperCAmelCase_ : List[Any] = initializer_range
UpperCAmelCase_ : Any = use_cache
super().__init__(**_UpperCamelCase )
| 29
|
import torch
from transformers import AutoModel
class __lowerCamelCase (torch.nn.Module ):
def __init__( self: Union[str, Any],A_: Tuple="sayef/fsner-bert-base-uncased" ):
'''simple docstring'''
super(A_,self ).__init__()
__UpperCamelCase = AutoModel.from_pretrained(A_,return_dict=A_ )
__UpperCamelCase = torch.nn.CosineSimilarity(3,1E-08 )
__UpperCamelCase = torch.nn.Softmax(dim=1 )
def snake_case_ ( self: Tuple,**A_: Union[str, Any] ):
'''simple docstring'''
return self.bert(**A_ ).last_hidden_state
def snake_case_ ( self: Union[str, Any],A_: Union[str, Any] ):
'''simple docstring'''
return token_embeddings.sum(2,keepdim=A_ )
def snake_case_ ( self: List[str],A_: Dict,A_: Union[str, Any],A_: Union[str, Any]=1 ):
'''simple docstring'''
return self.softmax(T * self.cos(A_,A_ ) )
def snake_case_ ( self: Optional[int],A_: Union[str, Any],A_: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = W_supports['sizes'].tolist()
__UpperCamelCase = W_supports['start_token_id'].item()
__UpperCamelCase = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
__UpperCamelCase = self.BERT(**A_ )
__UpperCamelCase = self.BERT(**A_ )
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = W_supports['input_ids'] == start_token_id
__UpperCamelCase = W_supports['input_ids'] == end_token_id
for i, size in enumerate(A_ ):
if i == 0:
__UpperCamelCase = 0
else:
__UpperCamelCase = support_sizes[i - 1]
__UpperCamelCase = S[s : s + size][start_token_masks[s : s + size]]
__UpperCamelCase = S[s : s + size][end_token_masks[s : s + size]]
__UpperCamelCase = torch.matmul(q[i],s_start.T ).sum(1 ).softmax(0 )
__UpperCamelCase = torch.matmul(q[i],s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
__UpperCamelCase = torch.vstack((p_starts, p_start) )
__UpperCamelCase = torch.vstack((p_ends, p_end) )
else:
__UpperCamelCase = p_start
__UpperCamelCase = p_end
return p_starts, p_ends
| 310
| 0
|
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
__a = '0.12' # assumed parallelism: 8
@require_flax
@is_staging_test
class lowercase__( unittest.TestCase ):
"""simple docstring"""
@classmethod
def _lowercase ( cls : Any ) -> Optional[Any]:
lowercase_ = TOKEN
HfFolder.save_token(SCREAMING_SNAKE_CASE_ )
@classmethod
def _lowercase ( cls : int ) -> str:
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def _lowercase ( self : List[Any] ) -> str:
lowercase_ = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
lowercase_ = FlaxBertModel(SCREAMING_SNAKE_CASE_ )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
lowercase_ = FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
lowercase_ = flatten_dict(unfreeze(model.params ) )
lowercase_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowercase_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(SCREAMING_SNAKE_CASE_ , 1e-3 , msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(SCREAMING_SNAKE_CASE_ , repo_id='''test-model-flax''' , push_to_hub=SCREAMING_SNAKE_CASE_ , use_auth_token=self._token )
lowercase_ = FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
lowercase_ = flatten_dict(unfreeze(model.params ) )
lowercase_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowercase_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(SCREAMING_SNAKE_CASE_ , 1e-3 , msg=f'''{key} not identical''' )
def _lowercase ( self : Union[str, Any] ) -> List[Any]:
lowercase_ = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
lowercase_ = FlaxBertModel(SCREAMING_SNAKE_CASE_ )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
lowercase_ = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
lowercase_ = flatten_dict(unfreeze(model.params ) )
lowercase_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowercase_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(SCREAMING_SNAKE_CASE_ , 1e-3 , msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
SCREAMING_SNAKE_CASE_ , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=SCREAMING_SNAKE_CASE_ , use_auth_token=self._token )
lowercase_ = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
lowercase_ = flatten_dict(unfreeze(model.params ) )
lowercase_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowercase_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(SCREAMING_SNAKE_CASE_ , 1e-3 , msg=f'''{key} not identical''' )
def a ( snake_case__: Any , snake_case__: List[str] ):
'''simple docstring'''
lowercase_ = True
lowercase_ = flatten_dict(modela.params )
lowercase_ = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
lowercase_ = False
return models_are_equal
@require_flax
class lowercase__( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : int ) -> Union[str, Any]:
lowercase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
lowercase_ = FlaxBertModel(SCREAMING_SNAKE_CASE_ )
lowercase_ = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
lowercase_ = FlaxBertModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowercase_ = FlaxBertModel.from_pretrained(SCREAMING_SNAKE_CASE_ , subfolder=SCREAMING_SNAKE_CASE_ )
self.assertTrue(check_models_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def _lowercase ( self : List[str] ) -> Union[str, Any]:
lowercase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
lowercase_ = FlaxBertModel(SCREAMING_SNAKE_CASE_ )
lowercase_ = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , max_shard_size='''10KB''' )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
lowercase_ = FlaxBertModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowercase_ = FlaxBertModel.from_pretrained(SCREAMING_SNAKE_CASE_ , subfolder=SCREAMING_SNAKE_CASE_ )
self.assertTrue(check_models_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def _lowercase ( self : Tuple ) -> List[str]:
lowercase_ = '''bert'''
lowercase_ = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
lowercase_ = FlaxBertModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowercase_ = FlaxBertModel.from_pretrained(SCREAMING_SNAKE_CASE_ , subfolder=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Any ) -> str:
lowercase_ = '''bert'''
lowercase_ = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
lowercase_ = FlaxBertModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowercase_ = FlaxBertModel.from_pretrained(SCREAMING_SNAKE_CASE_ , subfolder=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
| 30
|
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = BioGptTokenizer
_lowercase = False
def snake_case_ ( self: Any ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCamelCase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
__UpperCamelCase = dict(zip(A_,range(len(A_ ) ) ) )
__UpperCamelCase = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
__UpperCamelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file,'w' ) as fp:
fp.write(json.dumps(A_ ) )
with open(self.merges_file,'w' ) as fp:
fp.write('\n'.join(A_ ) )
def snake_case_ ( self: Optional[int],A_: List[Any] ):
'''simple docstring'''
__UpperCamelCase = 'lower newer'
__UpperCamelCase = 'lower newer'
return input_text, output_text
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = BioGptTokenizer(self.vocab_file,self.merges_file )
__UpperCamelCase = 'lower'
__UpperCamelCase = ['low', 'er</w>']
__UpperCamelCase = tokenizer.tokenize(A_ )
self.assertListEqual(A_,A_ )
__UpperCamelCase = tokens + ['<unk>']
__UpperCamelCase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ),A_ )
@slow
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
__UpperCamelCase = tokenizer.encode('sequence builders',add_special_tokens=A_ )
__UpperCamelCase = tokenizer.encode('multi-sequence build',add_special_tokens=A_ )
__UpperCamelCase = tokenizer.build_inputs_with_special_tokens(A_ )
__UpperCamelCase = tokenizer.build_inputs_with_special_tokens(A_,A_ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 310
| 0
|
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
__SCREAMING_SNAKE_CASE : Any = imread(R"""digital_image_processing/image_data/lena_small.jpg""")
__SCREAMING_SNAKE_CASE : Tuple = cvtColor(img, COLOR_BGR2GRAY)
def UpperCamelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = cn.convert_to_negative(_UpperCAmelCase )
# assert negative_img array for at least one True
assert negative_img.any()
def UpperCamelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
with Image.open("digital_image_processing/image_data/lena_small.jpg" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(_UpperCAmelCase , 110 ) ).startswith(
"<PIL.Image.Image image mode=RGB size=100x100 at" )
def UpperCamelCase_ ( ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : int = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def UpperCamelCase_ ( ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : str = imread("digital_image_processing/image_data/lena_small.jpg" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
_UpperCAmelCase : str = canny.canny(_UpperCAmelCase )
# assert canny array for at least one True
assert canny_array.any()
def UpperCamelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
assert gg.gaussian_filter(_UpperCAmelCase , 5 , sigma=0.9 ).all()
def UpperCamelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : List[str] = array([[0.2_5, 0.5, 0.2_5], [0.5, -3, 0.5], [0.2_5, 0.5, 0.2_5]] )
_UpperCAmelCase : List[str] = conv.img_convolve(_UpperCAmelCase , _UpperCAmelCase ).astype(_UpperCAmelCase )
assert res.any()
def UpperCamelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
assert med.median_filter(_UpperCAmelCase , 3 ).any()
def UpperCamelCase_ ( ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase : Any = sob.sobel_filter(_UpperCAmelCase )
assert grad.any() and theta.any()
def UpperCamelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = sp.make_sepia(_UpperCAmelCase , 20 )
assert sepia.all()
def UpperCamelCase_ ( _UpperCAmelCase : str = "digital_image_processing/image_data/lena_small.jpg" ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Tuple = bs.Burkes(imread(_UpperCAmelCase , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def UpperCamelCase_ ( _UpperCAmelCase : str = "digital_image_processing/image_data/lena_small.jpg" , ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Tuple = rs.NearestNeighbour(imread(_UpperCAmelCase , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def UpperCamelCase_ ( ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : str = "digital_image_processing/image_data/lena.jpg"
# Reading the image and converting it to grayscale.
_UpperCAmelCase : Tuple = imread(_UpperCAmelCase , 0 )
# Test for get_neighbors_pixel function() return not None
_UpperCAmelCase : Any = 0
_UpperCAmelCase : str = 0
_UpperCAmelCase : Tuple = image[x_coordinate][y_coordinate]
_UpperCAmelCase : Dict = lbp.get_neighbors_pixel(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
_UpperCAmelCase : Dict = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
_UpperCAmelCase : List[str] = lbp.local_binary_value(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
assert lbp_image.any()
| 31
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__snake_case = r'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(_a )
class __lowerCamelCase (_a ):
_lowercase = """rag"""
_lowercase = True
def __init__( self: Tuple,A_: Any=None,A_: Any=True,A_: List[Any]=None,A_: Optional[int]=None,A_: List[Any]=None,A_: str=None,A_: Union[str, Any]=None,A_: List[Any]=" / ",A_: Union[str, Any]=" // ",A_: List[Any]=5,A_: Optional[int]=300,A_: Tuple=768,A_: Tuple=8,A_: Optional[Any]="wiki_dpr",A_: int="train",A_: Union[str, Any]="compressed",A_: Optional[int]=None,A_: List[Any]=None,A_: List[str]=False,A_: List[str]=False,A_: str=0.0,A_: List[Any]=True,A_: Tuple=False,A_: int=False,A_: Dict=False,A_: Tuple=True,A_: int=None,**A_: Optional[int],):
'''simple docstring'''
super().__init__(
bos_token_id=A_,pad_token_id=A_,eos_token_id=A_,decoder_start_token_id=A_,forced_eos_token_id=A_,is_encoder_decoder=A_,prefix=A_,vocab_size=A_,**A_,)
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
__UpperCamelCase = kwargs.pop('question_encoder' )
__UpperCamelCase = question_encoder_config.pop('model_type' )
__UpperCamelCase = kwargs.pop('generator' )
__UpperCamelCase = decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
__UpperCamelCase = AutoConfig.for_model(A_,**A_ )
__UpperCamelCase = AutoConfig.for_model(A_,**A_ )
__UpperCamelCase = reduce_loss
__UpperCamelCase = label_smoothing
__UpperCamelCase = exclude_bos_score
__UpperCamelCase = do_marginalize
__UpperCamelCase = title_sep
__UpperCamelCase = doc_sep
__UpperCamelCase = n_docs
__UpperCamelCase = max_combined_length
__UpperCamelCase = dataset
__UpperCamelCase = dataset_split
__UpperCamelCase = index_name
__UpperCamelCase = retrieval_vector_size
__UpperCamelCase = retrieval_batch_size
__UpperCamelCase = passages_path
__UpperCamelCase = index_path
__UpperCamelCase = use_dummy_dataset
__UpperCamelCase = output_retrieved
__UpperCamelCase = do_deduplication
__UpperCamelCase = use_cache
if self.forced_eos_token_id is None:
__UpperCamelCase = getattr(self.generator,'forced_eos_token_id',A_ )
@classmethod
def snake_case_ ( cls: Any,A_: PretrainedConfig,A_: PretrainedConfig,**A_: int ):
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict(),generator=generator_config.to_dict(),**A_ )
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = copy.deepcopy(self.__dict__ )
__UpperCamelCase = self.question_encoder.to_dict()
__UpperCamelCase = self.generator.to_dict()
__UpperCamelCase = self.__class__.model_type
return output
| 310
| 0
|
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class SCREAMING_SNAKE_CASE__ :
snake_case__ : int
snake_case__ : Node | None = None
snake_case__ : Node | None = None
def SCREAMING_SNAKE_CASE_ ( ) -> Node | None:
"""simple docstring"""
a_ : Dict = Node(1 )
a_ : Any = Node(2 )
a_ : List[Any] = Node(3 )
a_ : Tuple = Node(4 )
a_ : int = Node(5 )
return tree
def SCREAMING_SNAKE_CASE_ ( __A : Node | None ) -> list[int]:
"""simple docstring"""
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def SCREAMING_SNAKE_CASE_ ( __A : Node | None ) -> list[int]:
"""simple docstring"""
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def SCREAMING_SNAKE_CASE_ ( __A : Node | None ) -> list[int]:
"""simple docstring"""
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def SCREAMING_SNAKE_CASE_ ( __A : Node | None ) -> int:
"""simple docstring"""
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def SCREAMING_SNAKE_CASE_ ( __A : Node | None ) -> Sequence[Node | None]:
"""simple docstring"""
a_ : list[Any] = []
if root is None:
return output
a_ : Optional[int] = deque([root] )
while process_queue:
a_ : str = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def SCREAMING_SNAKE_CASE_ ( __A : Node | None , __A : int ) -> Sequence[Node | None]:
"""simple docstring"""
a_ : list[Any] = []
def populate_output(__A : Node | None , __A : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(__A , __A )
return output
def SCREAMING_SNAKE_CASE_ ( __A : Node | None , __A : int ) -> Sequence[Node | None]:
"""simple docstring"""
a_ : list[Any] = []
def populate_output(__A : Node | None , __A : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(__A , __A )
return output
def SCREAMING_SNAKE_CASE_ ( __A : Node | None ) -> Sequence[Node | None] | list[Any]:
"""simple docstring"""
if root is None:
return []
a_ : list[Sequence[Node | None]] = []
a_ : List[str] = 0
a_ : Any = height(__A )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(__A , __A ) )
a_ : List[Any] = 1
else:
output.append(get_nodes_from_right_to_left(__A , __A ) )
a_ : Any = 0
return output
def SCREAMING_SNAKE_CASE_ ( ) -> None: # Main function for testing.
"""simple docstring"""
a_ : str = make_tree()
print(F"""In-order Traversal: {inorder(__A )}""" )
print(F"""Pre-order Traversal: {preorder(__A )}""" )
print(F"""Post-order Traversal: {postorder(__A )}""" , '\n' )
print(F"""Height of Tree: {height(__A )}""" , '\n' )
print('Complete Level Order Traversal: ' )
print(level_order(__A ) , '\n' )
print('Level-wise order Traversal: ' )
for level in range(1 , height(__A ) + 1 ):
print(F"""Level {level}:""" , get_nodes_from_left_to_right(__A , level=__A ) )
print('\nZigZag order Traversal: ' )
print(zigzag(__A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 32
|
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class __lowerCamelCase (_a ):
_lowercase = """M-CLIP"""
def __init__( self: int,A_: Any=1024,A_: Union[str, Any]=768,**A_: str ):
'''simple docstring'''
__UpperCamelCase = transformerDimSize
__UpperCamelCase = imageDimSize
super().__init__(**A_ )
class __lowerCamelCase (_a ):
_lowercase = MCLIPConfig
def __init__( self: int,A_: Optional[Any],*A_: List[str],**A_: Union[str, Any] ):
'''simple docstring'''
super().__init__(A_,*A_,**A_ )
__UpperCamelCase = XLMRobertaModel(A_ )
__UpperCamelCase = torch.nn.Linear(
in_features=config.transformerDimensions,out_features=config.numDims )
def snake_case_ ( self: Dict,A_: int,A_: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = self.transformer(input_ids=A_,attention_mask=A_ )[0]
__UpperCamelCase = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(A_ ), embs
| 310
| 0
|
"""simple docstring"""
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowercase ( __snake_case : Tuple , __snake_case : Dict ):
lowercase_ : int = old_name
if "patch_embed" in old_name:
lowercase_ , lowercase_ , lowercase_ : Optional[int] = old_name.split('''.''' )
if layer == "0":
lowercase_ : Optional[Any] = old_name.replace('''0''' , '''convolution1''' )
elif layer == "1":
lowercase_ : str = old_name.replace('''1''' , '''batchnorm_before''' )
elif layer == "3":
lowercase_ : List[Any] = old_name.replace('''3''' , '''convolution2''' )
else:
lowercase_ : List[Any] = old_name.replace('''4''' , '''batchnorm_after''' )
if "network" in old_name and re.search(r'''\d\.\d''' , __snake_case ):
lowercase_ : List[str] = r'''\b\d{2}\b'''
if bool(re.search(__snake_case , __snake_case ) ):
lowercase_ : Optional[Any] = re.search(r'''\d\.\d\d.''' , __snake_case ).group()
else:
lowercase_ : Dict = re.search(r'''\d\.\d.''' , __snake_case ).group()
if int(match[0] ) < 6:
lowercase_ : int = old_name.replace(__snake_case , '''''' )
lowercase_ : Union[str, Any] = trimmed_name.replace('''network''' , match[0] + '''.meta4D_layers.blocks.''' + match[2:-1] )
lowercase_ : Any = '''intermediate_stages.''' + trimmed_name
else:
lowercase_ : Optional[Any] = old_name.replace(__snake_case , '''''' )
if int(match[2] ) < num_meta4D_last_stage:
lowercase_ : List[str] = trimmed_name.replace('''network''' , '''meta4D_layers.blocks.''' + match[2] )
else:
lowercase_ : Tuple = str(int(match[2] ) - num_meta4D_last_stage )
lowercase_ : Union[str, Any] = trimmed_name.replace('''network''' , '''meta3D_layers.blocks.''' + layer_index )
if "norm1" in old_name:
lowercase_ : Optional[int] = trimmed_name.replace('''norm1''' , '''layernorm1''' )
elif "norm2" in old_name:
lowercase_ : List[Any] = trimmed_name.replace('''norm2''' , '''layernorm2''' )
elif "fc1" in old_name:
lowercase_ : Union[str, Any] = trimmed_name.replace('''fc1''' , '''linear_in''' )
elif "fc2" in old_name:
lowercase_ : str = trimmed_name.replace('''fc2''' , '''linear_out''' )
lowercase_ : str = '''last_stage.''' + trimmed_name
elif "network" in old_name and re.search(r'''.\d.''' , __snake_case ):
lowercase_ : Union[str, Any] = old_name.replace('''network''' , '''intermediate_stages''' )
if "fc" in new_name:
lowercase_ : Tuple = new_name.replace('''fc''' , '''convolution''' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
lowercase_ : str = new_name.replace('''norm1''' , '''batchnorm_before''' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
lowercase_ : List[Any] = new_name.replace('''norm2''' , '''batchnorm_after''' )
if "proj" in new_name:
lowercase_ : Union[str, Any] = new_name.replace('''proj''' , '''projection''' )
if "dist_head" in new_name:
lowercase_ : List[str] = new_name.replace('''dist_head''' , '''distillation_classifier''' )
elif "head" in new_name:
lowercase_ : List[Any] = new_name.replace('''head''' , '''classifier''' )
elif "patch_embed" in new_name:
lowercase_ : int = '''efficientformer.''' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
lowercase_ : Dict = new_name.replace('''norm''' , '''layernorm''' )
lowercase_ : Any = '''efficientformer.''' + new_name
else:
lowercase_ : List[Any] = '''efficientformer.encoder.''' + new_name
return new_name
def lowercase ( __snake_case : int , __snake_case : List[Any] ):
for key in checkpoint.copy().keys():
lowercase_ : int = checkpoint.pop(__snake_case )
lowercase_ : int = val
return checkpoint
def lowercase ( ):
lowercase_ : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase_ : List[Any] = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return image
def lowercase ( __snake_case : Path , __snake_case : Path , __snake_case : Path , __snake_case : bool ):
lowercase_ : Union[str, Any] = torch.load(__snake_case , map_location='''cpu''' )['''model''']
lowercase_ : str = EfficientFormerConfig.from_json_file(__snake_case )
lowercase_ : Tuple = EfficientFormerForImageClassificationWithTeacher(__snake_case )
lowercase_ : Dict = '''_'''.join(checkpoint_path.split('''/''' )[-1].split('''.''' )[0].split('''_''' )[:-1] )
lowercase_ : Optional[Any] = config.depths[-1] - config.num_metaad_blocks + 1
lowercase_ : Optional[int] = convert_torch_checkpoint(__snake_case , __snake_case )
model.load_state_dict(__snake_case )
model.eval()
lowercase_ : Dict = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
# prepare image
lowercase_ : str = prepare_img()
lowercase_ : int = 2_5_6
lowercase_ : str = 2_2_4
lowercase_ : Tuple = EfficientFormerImageProcessor(
size={'''shortest_edge''': image_size} , crop_size={'''height''': crop_size, '''width''': crop_size} , resample=pillow_resamplings['''bicubic'''] , )
lowercase_ : Union[str, Any] = processor(images=__snake_case , return_tensors='''pt''' ).pixel_values
# original processing pipeline
lowercase_ : int = Compose(
[
Resize(__snake_case , interpolation=pillow_resamplings['''bicubic'''] ),
CenterCrop(__snake_case ),
ToTensor(),
Normalize(__snake_case , __snake_case ),
] )
lowercase_ : Dict = image_transforms(__snake_case ).unsqueeze(0 )
assert torch.allclose(__snake_case , __snake_case )
lowercase_ : str = model(__snake_case )
lowercase_ : List[str] = outputs.logits
lowercase_ : Any = (1, 1_0_0_0)
if "l1" in model_name:
lowercase_ : Any = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :1_0] , __snake_case , atol=1e-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
lowercase_ : Dict = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :1_0] , __snake_case , atol=1e-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
lowercase_ : Optional[Any] = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
F'''Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7''' )
# Save Checkpoints
Path(__snake_case ).mkdir(exist_ok=__snake_case )
model.save_pretrained(__snake_case )
print(F'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
processor.save_pretrained(__snake_case )
print(F'''Processor successfuly saved at {pytorch_dump_path}''' )
if push_to_hub:
print('''Pushing model to the hub...''' )
model.push_to_hub(
repo_id=F'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add model''' , use_temp_dir=__snake_case , )
processor.push_to_hub(
repo_id=F'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add image processor''' , use_temp_dir=__snake_case , )
if __name__ == "__main__":
__A : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''',
default=None,
type=str,
required=True,
help='''Path to EfficientFormer pytorch checkpoint.''',
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for EfficientFormer model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
parser.set_defaults(push_to_hub=True)
__A : int = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 33
|
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class __lowerCamelCase :
_lowercase = XGLMConfig
_lowercase = {}
_lowercase = """gelu"""
def __init__( self: Optional[int],A_: Dict,A_: Any=14,A_: Optional[int]=7,A_: str=True,A_: Any=True,A_: Optional[int]=True,A_: Optional[int]=99,A_: List[str]=32,A_: Any=2,A_: Tuple=4,A_: List[str]=37,A_: Dict="gelu",A_: int=0.1,A_: List[str]=0.1,A_: int=512,A_: List[Any]=0.0_2,):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_input_mask
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = d_model
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = ffn_dim
__UpperCamelCase = activation_function
__UpperCamelCase = activation_dropout
__UpperCamelCase = attention_dropout
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = initializer_range
__UpperCamelCase = None
__UpperCamelCase = 0
__UpperCamelCase = 2
__UpperCamelCase = 1
def snake_case_ ( self: Dict ):
'''simple docstring'''
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length],self.vocab_size ),clip_value_min=0,clip_value_max=3 )
__UpperCamelCase = None
if self.use_input_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = self.get_config()
__UpperCamelCase = floats_tensor([self.num_hidden_layers, self.num_attention_heads],2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
return XGLMConfig(
vocab_size=self.vocab_size,d_model=self.hidden_size,num_layers=self.num_hidden_layers,attention_heads=self.num_attention_heads,ffn_dim=self.ffn_dim,activation_function=self.activation_function,activation_dropout=self.activation_dropout,attention_dropout=self.attention_dropout,max_position_embeddings=self.max_position_embeddings,initializer_range=self.initializer_range,use_cache=A_,bos_token_id=self.bos_token_id,eos_token_id=self.eos_token_id,pad_token_id=self.pad_token_id,return_dict=A_,)
def snake_case_ ( self: int ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
), (
__UpperCamelCase
), (
__UpperCamelCase
), (
__UpperCamelCase
),
) = config_and_inputs
__UpperCamelCase = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class __lowerCamelCase (_a , _a , unittest.TestCase ):
_lowercase = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
_lowercase = (TFXGLMForCausalLM,) if is_tf_available() else ()
_lowercase = (
{"""feature-extraction""": TFXGLMModel, """text-generation""": TFXGLMForCausalLM} if is_tf_available() else {}
)
_lowercase = False
_lowercase = False
_lowercase = False
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = TFXGLMModelTester(self )
__UpperCamelCase = ConfigTester(self,config_class=A_,n_embd=37 )
def snake_case_ ( self: Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
@slow
def snake_case_ ( self: Any ):
'''simple docstring'''
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = TFXGLMModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def snake_case_ ( self: Tuple ):
'''simple docstring'''
super().test_resize_token_embeddings()
@require_tf
class __lowerCamelCase (unittest.TestCase ):
@slow
def snake_case_ ( self: Optional[Any],A_: int=True ):
'''simple docstring'''
__UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = tf.convert_to_tensor([[2, 268, 9865]],dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__UpperCamelCase = [2, 268, 9865, 67, 11, 1988, 5_7252, 9865, 5, 984, 67, 1988, 21_3838, 1658, 53, 7_0446, 33, 6657, 278, 1581]
# fmt: on
__UpperCamelCase = model.generate(A_,do_sample=A_,num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist(),A_ )
@slow
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
__UpperCamelCase = tokenizer('Today is a nice day and',return_tensors='tf' )
__UpperCamelCase = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
__UpperCamelCase = model.generate(A_,do_sample=A_,seed=[7, 0] )
__UpperCamelCase = tokenizer.decode(output_ids[0],skip_special_tokens=A_ )
__UpperCamelCase = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(A_,A_ )
@slow
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = 'left'
# use different length sentences to test batching
__UpperCamelCase = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
__UpperCamelCase = tokenizer(A_,return_tensors='tf',padding=A_ )
__UpperCamelCase = inputs['input_ids']
__UpperCamelCase = model.generate(input_ids=A_,attention_mask=inputs['attention_mask'],max_new_tokens=12 )
__UpperCamelCase = tokenizer(sentences[0],return_tensors='tf' ).input_ids
__UpperCamelCase = model.generate(input_ids=A_,max_new_tokens=12 )
__UpperCamelCase = tokenizer(sentences[1],return_tensors='tf' ).input_ids
__UpperCamelCase = model.generate(input_ids=A_,max_new_tokens=12 )
__UpperCamelCase = tokenizer.batch_decode(A_,skip_special_tokens=A_ )
__UpperCamelCase = tokenizer.decode(output_non_padded[0],skip_special_tokens=A_ )
__UpperCamelCase = tokenizer.decode(output_padded[0],skip_special_tokens=A_ )
__UpperCamelCase = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(A_,A_ )
self.assertListEqual(A_,[non_padded_sentence, padded_sentence] )
| 310
| 0
|
'''simple docstring'''
def snake_case_ (_a : int ):
if number < 0:
raise ValueError('''number must not be negative''' )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 34
|
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__snake_case = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json'''
with io.open(filename, '''r''', encoding='''utf-8''') as f:
__snake_case = json.load(f)
@require_torch
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: int,A_: int ):
'''simple docstring'''
return FSMTTokenizer.from_pretrained(A_ )
def snake_case_ ( self: Dict,A_: int ):
'''simple docstring'''
__UpperCamelCase = FSMTForConditionalGeneration.from_pretrained(A_ ).to(A_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 2_6.0],
['ru-en', 2_2.0],
['en-de', 2_2.0],
['de-en', 2_9.0],
] )
@slow
def snake_case_ ( self: Tuple,A_: Any,A_: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = F'''facebook/wmt19-{pair}'''
__UpperCamelCase = self.get_tokenizer(A_ )
__UpperCamelCase = self.get_model(A_ )
__UpperCamelCase = bleu_data[pair]['src']
__UpperCamelCase = bleu_data[pair]['tgt']
__UpperCamelCase = tokenizer(A_,return_tensors='pt',truncation=A_,padding='longest' ).to(A_ )
__UpperCamelCase = model.generate(
input_ids=batch.input_ids,num_beams=8,)
__UpperCamelCase = tokenizer.batch_decode(
A_,skip_special_tokens=A_,clean_up_tokenization_spaces=A_ )
__UpperCamelCase = calculate_bleu(A_,A_ )
print(A_ )
self.assertGreaterEqual(scores['bleu'],A_ )
| 310
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__a = {"configuration_glpn": ["GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP", "GLPNConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["GLPNFeatureExtractor"]
__a = ["GLPNImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"GLPN_PRETRAINED_MODEL_ARCHIVE_LIST",
"GLPNForDepthEstimation",
"GLPNLayer",
"GLPNModel",
"GLPNPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 35
|
def _A ( _lowercase ) -> list[int]:
"""simple docstring"""
if length <= 0 or not isinstance(_lowercase , _lowercase ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(_lowercase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=1_0))
| 310
| 0
|
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class UpperCAmelCase_ ( a):
def __init__( self, __a = "▁", __a = True, __a = "<unk>", __a = "</s>", __a = "<pad>", ):
'''simple docstring'''
_lowerCAmelCase : List[str] = {
"pad": {"id": 0, "token": pad_token},
"eos": {"id": 1, "token": eos_token},
"unk": {"id": 2, "token": unk_token},
}
_lowerCAmelCase : str = [None] * len(self.special_tokens)
for token_dict in self.special_tokens.values():
_lowerCAmelCase : Dict = token_dict["token"]
_lowerCAmelCase : int = Tokenizer(Unigram())
_lowerCAmelCase : Any = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(" {2,}"), " "),
normalizers.Lowercase(),
])
_lowerCAmelCase : Tuple = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=__a, add_prefix_space=__a),
pre_tokenizers.Digits(individual_digits=__a),
pre_tokenizers.Punctuation(),
])
_lowerCAmelCase : List[str] = decoders.Metaspace(replacement=__a, add_prefix_space=__a)
_lowerCAmelCase : Tuple = TemplateProcessing(
single=f"$A {self.special_tokens['eos']['token']}", special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])], )
_lowerCAmelCase : List[Any] = {
"model": "SentencePieceUnigram",
"replacement": replacement,
"add_prefix_space": add_prefix_space,
}
super().__init__(__a, __a)
def snake_case__ ( self, __a, __a = 8000, __a = True, ):
'''simple docstring'''
_lowerCAmelCase : Dict = trainers.UnigramTrainer(
vocab_size=__a, special_tokens=self.special_tokens_list, show_progress=__a, )
if isinstance(__a, __a):
_lowerCAmelCase : Union[str, Any] = [files]
self._tokenizer.train(__a, trainer=__a)
self.add_unk_id()
def snake_case__ ( self, __a, __a = 8000, __a = True, ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = trainers.UnigramTrainer(
vocab_size=__a, special_tokens=self.special_tokens_list, show_progress=__a, )
self._tokenizer.train_from_iterator(__a, trainer=__a)
self.add_unk_id()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = json.loads(self._tokenizer.to_str())
_lowerCAmelCase : Tuple = self.special_tokens["unk"]["id"]
_lowerCAmelCase : Any = Tokenizer.from_str(json.dumps(__a))
| 36
|
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = MgpstrTokenizer
_lowercase = False
_lowercase = {}
_lowercase = False
def snake_case_ ( self: int ):
'''simple docstring'''
super().setUp()
# fmt: off
__UpperCamelCase = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
__UpperCamelCase = dict(zip(A_,range(len(A_ ) ) ) )
__UpperCamelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file,'w',encoding='utf-8' ) as fp:
fp.write(json.dumps(A_ ) + '\n' )
def snake_case_ ( self: Dict,**A_: Tuple ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname,**A_ )
def snake_case_ ( self: List[Any],A_: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = 'tester'
__UpperCamelCase = 'tester'
return input_text, output_text
@unittest.skip('MGP-STR always lower cases letters.' )
def snake_case_ ( self: str ):
'''simple docstring'''
pass
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = self.get_tokenizers(do_lower_case=A_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__UpperCamelCase = '[SPECIAL_TOKEN]'
tokenizer.add_special_tokens({'cls_token': special_token} )
__UpperCamelCase = tokenizer.encode([special_token],add_special_tokens=A_ )
self.assertEqual(len(A_ ),1 )
__UpperCamelCase = tokenizer.decode(A_,skip_special_tokens=A_ )
self.assertTrue(special_token not in decoded )
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__UpperCamelCase, __UpperCamelCase = self.get_input_output_texts(A_ )
__UpperCamelCase = tokenizer.tokenize(A_ )
__UpperCamelCase = tokenizer.convert_tokens_to_ids(A_ )
__UpperCamelCase = tokenizer.encode(A_,add_special_tokens=A_ )
self.assertListEqual(A_,A_ )
__UpperCamelCase = tokenizer.convert_ids_to_tokens(A_ )
self.assertNotEqual(len(A_ ),0 )
__UpperCamelCase = tokenizer.decode(A_ )
self.assertIsInstance(A_,A_ )
self.assertEqual(text_a.replace(' ','' ),A_ )
@unittest.skip('MGP-STR tokenizer only handles one sequence.' )
def snake_case_ ( self: int ):
'''simple docstring'''
pass
@unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer' )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
pass
| 310
| 0
|
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_5_0, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_ddp.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf_dist.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.6, '''eval_loss''': 0.7},
},
] )
class lowerCAmelCase_( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self ) -> Optional[Any]:
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() ,encoding="""utf-8""" ,check=__UpperCAmelCase ,)
assert hasattr(self ,"""env""" )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[Any]:
lowerCAmelCase__ : Optional[int] = F"""{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"""
# distributed data settings
lowerCAmelCase__ : Any = {"""smdistributed""": {"""dataparallel""": {"""enabled""": True}}} if self.script != """run_ddp.py""" else None
# creates estimator
return HuggingFace(
entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=__UpperCAmelCase ,instance_count=__UpperCAmelCase ,instance_type=self.instance_type ,debugger_hook_config=__UpperCAmelCase ,hyperparameters={**self.env.distributed_hyperparameters, """model_name_or_path""": self.model_name_or_path} ,metric_definitions=self.env.metric_definitions ,distribution=__UpperCAmelCase ,py_version="""py36""" ,)
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[Any]:
TrainingJobAnalytics(__UpperCAmelCase ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Any:
# create estimator
lowerCAmelCase__ : List[Any] = self.create_estimator(__UpperCAmelCase )
# run training
estimator.fit()
# result dataframe
lowerCAmelCase__ : Any = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCAmelCase__ : int = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
lowerCAmelCase__ : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCAmelCase__ : List[str] = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" ,99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" ,"""w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} ,__UpperCAmelCase )
| 37
|
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"""The RoBERTa Model transformer with early exiting (DeeRoBERTa). """ , _a , )
class __lowerCamelCase (_a ):
_lowercase = RobertaConfig
_lowercase = """roberta"""
def __init__( self: Union[str, Any],A_: List[str] ):
'''simple docstring'''
super().__init__(A_ )
__UpperCamelCase = RobertaEmbeddings(A_ )
self.init_weights()
@add_start_docstrings(
"""RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. """ , _a , )
class __lowerCamelCase (_a ):
_lowercase = RobertaConfig
_lowercase = """roberta"""
def __init__( self: Any,A_: int ):
'''simple docstring'''
super().__init__(A_ )
__UpperCamelCase = config.num_labels
__UpperCamelCase = config.num_hidden_layers
__UpperCamelCase = DeeRobertaModel(A_ )
__UpperCamelCase = nn.Dropout(config.hidden_dropout_prob )
__UpperCamelCase = nn.Linear(config.hidden_size,self.config.num_labels )
@add_start_docstrings_to_model_forward(A_ )
def snake_case_ ( self: List[str],A_: int=None,A_: List[Any]=None,A_: List[str]=None,A_: List[str]=None,A_: Optional[int]=None,A_: List[str]=None,A_: Any=None,A_: List[Any]=-1,A_: List[Any]=False,):
'''simple docstring'''
__UpperCamelCase = self.num_layers
try:
__UpperCamelCase = self.roberta(
A_,attention_mask=A_,token_type_ids=A_,position_ids=A_,head_mask=A_,inputs_embeds=A_,)
__UpperCamelCase = outputs[1]
__UpperCamelCase = self.dropout(A_ )
__UpperCamelCase = self.classifier(A_ )
__UpperCamelCase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__UpperCamelCase = e.message
__UpperCamelCase = e.exit_layer
__UpperCamelCase = outputs[0]
if not self.training:
__UpperCamelCase = entropy(A_ )
__UpperCamelCase = []
__UpperCamelCase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__UpperCamelCase = MSELoss()
__UpperCamelCase = loss_fct(logits.view(-1 ),labels.view(-1 ) )
else:
__UpperCamelCase = CrossEntropyLoss()
__UpperCamelCase = loss_fct(logits.view(-1,self.num_labels ),labels.view(-1 ) )
# work with highway exits
__UpperCamelCase = []
for highway_exit in outputs[-1]:
__UpperCamelCase = highway_exit[0]
if not self.training:
highway_logits_all.append(A_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__UpperCamelCase = MSELoss()
__UpperCamelCase = loss_fct(highway_logits.view(-1 ),labels.view(-1 ) )
else:
__UpperCamelCase = CrossEntropyLoss()
__UpperCamelCase = loss_fct(highway_logits.view(-1,self.num_labels ),labels.view(-1 ) )
highway_losses.append(A_ )
if train_highway:
__UpperCamelCase = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__UpperCamelCase = (loss,) + outputs
if not self.training:
__UpperCamelCase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__UpperCamelCase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 310
| 0
|
from __future__ import annotations
from math import pi
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : float , __magic_name__ : float , __magic_name__ : float ) -> dict[str, float]:
"""simple docstring"""
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if inductance < 0:
raise ValueError("""Inductance cannot be negative""" )
if frequency < 0:
raise ValueError("""Frequency cannot be negative""" )
if reactance < 0:
raise ValueError("""Inductive reactance cannot be negative""" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 38
|
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCamelCase :
@staticmethod
def snake_case_ ( *A_: Optional[Any],**A_: Tuple ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __lowerCamelCase (unittest.TestCase ):
_lowercase = MODEL_FOR_OBJECT_DETECTION_MAPPING
def snake_case_ ( self: Dict,A_: Optional[int],A_: Tuple,A_: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = ObjectDetectionPipeline(model=A_,image_processor=A_ )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def snake_case_ ( self: int,A_: Any,A_: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = object_detector('./tests/fixtures/tests_samples/COCO/000000039769.png',threshold=0.0 )
self.assertGreater(len(A_ ),0 )
for detected_object in outputs:
self.assertEqual(
A_,{
'score': ANY(A_ ),
'label': ANY(A_ ),
'box': {'xmin': ANY(A_ ), 'ymin': ANY(A_ ), 'xmax': ANY(A_ ), 'ymax': ANY(A_ )},
},)
import datasets
__UpperCamelCase = datasets.load_dataset('hf-internal-testing/fixtures_image_utils','image',split='test' )
__UpperCamelCase = [
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
]
__UpperCamelCase = object_detector(A_,threshold=0.0 )
self.assertEqual(len(A_ ),len(A_ ) )
for outputs in batch_outputs:
self.assertGreater(len(A_ ),0 )
for detected_object in outputs:
self.assertEqual(
A_,{
'score': ANY(A_ ),
'label': ANY(A_ ),
'box': {'xmin': ANY(A_ ), 'ymin': ANY(A_ ), 'xmax': ANY(A_ ), 'ymax': ANY(A_ )},
},)
@require_tf
@unittest.skip('Object detection not implemented in TF' )
def snake_case_ ( self: str ):
'''simple docstring'''
pass
@require_torch
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = 'hf-internal-testing/tiny-detr-mobilenetsv3'
__UpperCamelCase = AutoModelForObjectDetection.from_pretrained(A_ )
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(A_ )
__UpperCamelCase = ObjectDetectionPipeline(model=A_,feature_extractor=A_ )
__UpperCamelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg',threshold=0.0 )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],)
__UpperCamelCase = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
],threshold=0.0,)
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
[
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
[
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
],)
@require_torch
@slow
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = 'facebook/detr-resnet-50'
__UpperCamelCase = AutoModelForObjectDetection.from_pretrained(A_ )
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(A_ )
__UpperCamelCase = ObjectDetectionPipeline(model=A_,feature_extractor=A_ )
__UpperCamelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],)
__UpperCamelCase = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
],)
@require_torch
@slow
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = 'facebook/detr-resnet-50'
__UpperCamelCase = pipeline('object-detection',model=A_ )
__UpperCamelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],)
__UpperCamelCase = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
],)
@require_torch
@slow
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = 0.9_9_8_5
__UpperCamelCase = 'facebook/detr-resnet-50'
__UpperCamelCase = pipeline('object-detection',model=A_ )
__UpperCamelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg',threshold=A_ )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],)
@require_torch
@require_pytesseract
@slow
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = 'Narsil/layoutlmv3-finetuned-funsd'
__UpperCamelCase = 0.9_9_9_3
__UpperCamelCase = pipeline('object-detection',model=A_,threshold=A_ )
__UpperCamelCase = object_detector(
'https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png' )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.9_9_9_3, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
{'score': 0.9_9_9_3, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
],)
| 310
| 0
|
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 39
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/config.json''',
'''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/config.json''',
'''xlm-roberta-large-finetuned-conll02-dutch''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll02-spanish''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-english''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-german''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'''
),
}
class __lowerCamelCase (_a ):
_lowercase = """xlm-roberta"""
def __init__( self: Union[str, Any],A_: Union[str, Any]=3_0522,A_: Dict=768,A_: Union[str, Any]=12,A_: Any=12,A_: str=3072,A_: Union[str, Any]="gelu",A_: str=0.1,A_: Optional[int]=0.1,A_: List[Any]=512,A_: Optional[Any]=2,A_: Dict=0.0_2,A_: List[Any]=1E-12,A_: Optional[int]=1,A_: str=0,A_: str=2,A_: Optional[Any]="absolute",A_: Union[str, Any]=True,A_: int=None,**A_: Optional[Any],):
'''simple docstring'''
super().__init__(pad_token_id=A_,bos_token_id=A_,eos_token_id=A_,**A_ )
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_act
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = position_embedding_type
__UpperCamelCase = use_cache
__UpperCamelCase = classifier_dropout
class __lowerCamelCase (_a ):
@property
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
__UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 310
| 0
|
"""simple docstring"""
class _A :
"""simple docstring"""
def __init__( self : List[Any] , __UpperCAmelCase : Optional[Any]):
a : Union[str, Any] = val
a : str = None
a : Dict = None
def __snake_case ( self : Tuple , __UpperCAmelCase : List[Any]):
if self.val:
if val < self.val:
if self.left is None:
a : Tuple = Node(__UpperCAmelCase)
else:
self.left.insert(__UpperCAmelCase)
elif val > self.val:
if self.right is None:
a : Union[str, Any] = Node(__UpperCAmelCase)
else:
self.right.insert(__UpperCAmelCase)
else:
a : Tuple = val
def lowercase ( A_ , A_ )-> Any:
'''simple docstring'''
if root:
inorder(root.left , A_ )
res.append(root.val )
inorder(root.right , A_ )
def lowercase ( A_ )-> Any:
'''simple docstring'''
if len(A_ ) == 0:
return arr
a : Optional[int] = Node(arr[0] )
for i in range(1 , len(A_ ) ):
root.insert(arr[i] )
# Traverse BST in order.
a : Optional[Any] = []
inorder(A_ , A_ )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 40
|
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__snake_case = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __lowerCamelCase (_a ):
_lowercase = field(default=_a , metadata={"""help""": """Whether to use SortishSampler or not."""} )
_lowercase = field(
default=_a , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
_lowercase = field(
default=_a , metadata={
"""help""": (
"""The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `max_length` value of the model configuration."""
)
} , )
_lowercase = field(
default=_a , metadata={
"""help""": (
"""The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `num_beams` value of the model configuration."""
)
} , )
_lowercase = field(
default=_a , metadata={
"""help""": """Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."""
} , )
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = super().to_dict()
for k, v in d.items():
if isinstance(A_,A_ ):
__UpperCamelCase = v.to_dict()
return d
| 310
| 0
|
'''simple docstring'''
import os
import sys
import unittest
_A : List[str] =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
_A : Dict =os.path.join(git_repo_path, '''src''', '''transformers''')
_A : Dict ='''
{0} = None
'''
_A : Dict ='''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
'''
_A : List[str] ='''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : int = find_backend(""" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")""" )
self.assertIsNone(UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = find_backend(""" if not is_tokenizers_available():""" )
self.assertEqual(UpperCamelCase__ , """tokenizers""" )
lowerCamelCase__ : Tuple = find_backend(""" if not is_tensorflow_text_available():""" )
self.assertEqual(UpperCamelCase__ , """tensorflow_text""" )
lowerCamelCase__ : Dict = find_backend(""" if not (is_sentencepiece_available() and is_tokenizers_available()):""" )
self.assertEqual(UpperCamelCase__ , """sentencepiece_and_tokenizers""" )
lowerCamelCase__ : Union[str, Any] = find_backend(
""" if not (is_sentencepiece_available() and is_tensorflow_text_available()):""" )
self.assertEqual(UpperCamelCase__ , """sentencepiece_and_tensorflow_text""" )
lowerCamelCase__ : str = find_backend(
""" if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):""" )
self.assertEqual(UpperCamelCase__ , """sentencepiece_and_tokenizers_and_vision""" )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Tuple = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""" , UpperCamelCase__ )
self.assertIn("""tensorflow_text""" , UpperCamelCase__ )
self.assertIn("""sentencepiece_and_tokenizers""" , UpperCamelCase__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""BertModel""" , objects["""torch"""] )
self.assertIn("""TFBertModel""" , objects["""tf"""] )
self.assertIn("""FlaxBertModel""" , objects["""flax"""] )
self.assertIn("""BertModel""" , objects["""torch"""] )
self.assertIn("""TFBertTokenizer""" , objects["""tensorflow_text"""] )
self.assertIn("""convert_slow_tokenizer""" , objects["""sentencepiece_and_tokenizers"""] )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Tuple = create_dummy_object("""CONSTANT""" , """'torch'""" )
self.assertEqual(UpperCamelCase__ , """\nCONSTANT = None\n""" )
lowerCamelCase__ : List[str] = create_dummy_object("""function""" , """'torch'""" )
self.assertEqual(
UpperCamelCase__ , """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
lowerCamelCase__ : List[str] = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
"""
lowerCamelCase__ : int = create_dummy_object("""FakeClass""" , """'torch'""" )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : List[Any] = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
"""
lowerCamelCase__ : Any = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""] , UpperCamelCase__ )
| 41
|
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def _A ( _lowercase ) -> Dict:
"""simple docstring"""
if is_torch_version('<' , '2.0.0' ) or not hasattr(_lowercase , '_dynamo' ):
return False
return isinstance(_lowercase , torch._dynamo.eval_frame.OptimizedModule )
def _A ( _lowercase , _lowercase = True ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
__UpperCamelCase = is_compiled_module(_lowercase )
if is_compiled:
__UpperCamelCase = model
__UpperCamelCase = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(_lowercase , _lowercase ):
__UpperCamelCase = model.module
if not keep_fpaa_wrapper:
__UpperCamelCase = getattr(_lowercase , 'forward' )
__UpperCamelCase = model.__dict__.pop('_original_forward' , _lowercase )
if original_forward is not None:
while hasattr(_lowercase , '__wrapped__' ):
__UpperCamelCase = forward.__wrapped__
if forward == original_forward:
break
__UpperCamelCase = forward
if getattr(_lowercase , '_converted_to_transformer_engine' , _lowercase ):
convert_model(_lowercase , to_transformer_engine=_lowercase )
if is_compiled:
__UpperCamelCase = model
__UpperCamelCase = compiled_model
return model
def _A ( ) -> Any:
"""simple docstring"""
PartialState().wait_for_everyone()
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
if PartialState().distributed_type == DistributedType.TPU:
xm.save(_lowercase , _lowercase )
elif PartialState().local_process_index == 0:
torch.save(_lowercase , _lowercase )
@contextmanager
def _A ( **_lowercase ) -> Union[str, Any]:
"""simple docstring"""
for key, value in kwargs.items():
__UpperCamelCase = str(_lowercase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def _A ( _lowercase ) -> Tuple:
"""simple docstring"""
if not hasattr(_lowercase , '__qualname__' ) and not hasattr(_lowercase , '__name__' ):
__UpperCamelCase = getattr(_lowercase , '__class__' , _lowercase )
if hasattr(_lowercase , '__qualname__' ):
return obj.__qualname__
if hasattr(_lowercase , '__name__' ):
return obj.__name__
return str(_lowercase )
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
for key, value in source.items():
if isinstance(_lowercase , _lowercase ):
__UpperCamelCase = destination.setdefault(_lowercase , {} )
merge_dicts(_lowercase , _lowercase )
else:
__UpperCamelCase = value
return destination
def _A ( _lowercase = None ) -> bool:
"""simple docstring"""
if port is None:
__UpperCamelCase = 2_95_00
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 310
| 0
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class __UpperCAmelCase ( unittest.TestCase ):
__lowercase = StableDiffusionLDMaDPipeline
__lowercase = TEXT_TO_IMAGE_PARAMS
__lowercase = TEXT_TO_IMAGE_BATCH_PARAMS
__lowercase = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_snake_case = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
_snake_case = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=lowerCAmelCase_ , set_alpha_to_one=lowerCAmelCase_ , )
torch.manual_seed(0 )
_snake_case = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
_snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
_snake_case = CLIPTextModel(lowerCAmelCase_ )
_snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_snake_case = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=0 ):
"""simple docstring"""
if str(lowerCAmelCase_ ).startswith('mps' ):
_snake_case = torch.manual_seed(lowerCAmelCase_ )
else:
_snake_case = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
_snake_case = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case = self.get_dummy_components()
_snake_case = StableDiffusionLDMaDPipeline(**lowerCAmelCase_ )
_snake_case = ldmad_pipe.to(lowerCAmelCase_ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = self.get_dummy_inputs(lowerCAmelCase_ )
_snake_case = ldmad_pipe(**lowerCAmelCase_ )
_snake_case , _snake_case = output.rgb, output.depth
_snake_case = rgb[0, -3:, -3:, -1]
_snake_case = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
_snake_case = np.array(
[0.37338176, 0.70247, 0.74203193, 0.51643604, 0.58256793, 0.60932136, 0.4181095, 0.48355877, 0.46535262] )
_snake_case = np.array([103.46727, 85.812004, 87.849236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1E-2
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.get_dummy_components()
_snake_case = StableDiffusionLDMaDPipeline(**lowerCAmelCase_ )
_snake_case = ldmad_pipe.to(lowerCAmelCase_ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = self.get_dummy_inputs(lowerCAmelCase_ )
_snake_case = 3 * [inputs['prompt']]
# forward
_snake_case = ldmad_pipe(**lowerCAmelCase_ )
_snake_case , _snake_case = output.rgb, output.depth
_snake_case = rgb_slice_a[0, -3:, -3:, -1]
_snake_case = depth_slice_a[0, -3:, -1]
_snake_case = self.get_dummy_inputs(lowerCAmelCase_ )
_snake_case = 3 * [inputs.pop('prompt' )]
_snake_case = ldmad_pipe.tokenizer(
lowerCAmelCase_ , padding='max_length' , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=lowerCAmelCase_ , return_tensors='pt' , )
_snake_case = text_inputs['input_ids'].to(lowerCAmelCase_ )
_snake_case = ldmad_pipe.text_encoder(lowerCAmelCase_ )[0]
_snake_case = prompt_embeds
# forward
_snake_case = ldmad_pipe(**lowerCAmelCase_ )
_snake_case , _snake_case = output.rgb, output.depth
_snake_case = rgb_slice_a[0, -3:, -3:, -1]
_snake_case = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1E-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1E-4
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case = self.get_dummy_components()
_snake_case = PNDMScheduler(skip_prk_steps=lowerCAmelCase_ )
_snake_case = StableDiffusionLDMaDPipeline(**lowerCAmelCase_ )
_snake_case = ldmad_pipe.to(lowerCAmelCase_ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = self.get_dummy_inputs(lowerCAmelCase_ )
_snake_case = 'french fries'
_snake_case = ldmad_pipe(**lowerCAmelCase_ , negative_prompt=lowerCAmelCase_ )
_snake_case , _snake_case = output.rgb, output.depth
_snake_case = rgb[0, -3:, -3:, -1]
_snake_case = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
_snake_case = np.array(
[0.37044, 0.71811503, 0.7223251, 0.48603675, 0.5638391, 0.6364948, 0.42833704, 0.4901315, 0.47926217] )
_snake_case = np.array([107.84738, 84.62802, 89.962135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1E-2
@slow
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_="cpu" , lowerCAmelCase_=torch.floataa , lowerCAmelCase_=0 ):
"""simple docstring"""
_snake_case = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
_snake_case = np.random.RandomState(lowerCAmelCase_ ).standard_normal((1, 4, 64, 64) )
_snake_case = torch.from_numpy(lowerCAmelCase_ ).to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_ )
_snake_case = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d' )
_snake_case = ldmad_pipe.to(lowerCAmelCase_ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = self.get_inputs(lowerCAmelCase_ )
_snake_case = ldmad_pipe(**lowerCAmelCase_ )
_snake_case , _snake_case = output.rgb, output.depth
_snake_case = rgb[0, -3:, -3:, -1].flatten()
_snake_case = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 5_12, 5_12, 3)
assert depth.shape == (1, 5_12, 5_12)
_snake_case = np.array(
[0.53805465, 0.56707305, 0.5486515, 0.57012236, 0.5814511, 0.56253487, 0.54843014, 0.55092263, 0.6459706] )
_snake_case = np.array(
[0.9263781, 0.6678672, 0.5486515, 0.92202145, 0.67831135, 0.56253487, 0.9241694, 0.7551478, 0.6459706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3E-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3E-3
@nightly
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_="cpu" , lowerCAmelCase_=torch.floataa , lowerCAmelCase_=0 ):
"""simple docstring"""
_snake_case = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
_snake_case = np.random.RandomState(lowerCAmelCase_ ).standard_normal((1, 4, 64, 64) )
_snake_case = torch.from_numpy(lowerCAmelCase_ ).to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_ )
_snake_case = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d' ).to(lowerCAmelCase_ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = self.get_inputs(lowerCAmelCase_ )
_snake_case = ldmad_pipe(**lowerCAmelCase_ )
_snake_case , _snake_case = output.rgb, output.depth
_snake_case = 0.495586
_snake_case = 0.33795515
_snake_case = 112.48518
_snake_case = 98.489746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d-4c' ).to(lowerCAmelCase_ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = self.get_inputs(lowerCAmelCase_ )
_snake_case = ldmad_pipe(**lowerCAmelCase_ )
_snake_case , _snake_case = output.rgb, output.depth
_snake_case = 0.4194127
_snake_case = 0.35375586
_snake_case = 0.5638502
_snake_case = 0.34686103
assert rgb.shape == (1, 5_12, 5_12, 3)
assert depth.shape == (1, 5_12, 5_12, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
| 42
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__snake_case = logging.getLogger(__name__)
def _A ( _lowercase , _lowercase ) -> Optional[int]:
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class __lowerCamelCase :
_lowercase = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class __lowerCamelCase :
_lowercase = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} )
_lowercase = field(metadata={"""help""": """Should contain the data files for the task."""} )
_lowercase = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_lowercase = field(
default=_a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _A ( ) -> str:
"""simple docstring"""
__UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _lowercase )
# Set seed
set_seed(training_args.seed )
try:
__UpperCamelCase = processors[data_args.task_name]()
__UpperCamelCase = processor.get_labels()
__UpperCamelCase = len(_lowercase )
except KeyError:
raise ValueError('Task not found: %s' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowercase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
__UpperCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__UpperCamelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , )
# Get datasets
__UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_lowercase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_lowercase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(_lowercase ) -> Dict:
__UpperCamelCase = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(_lowercase , p.label_ids )}
# Data collator
__UpperCamelCase = DataCollatorWithPadding(_lowercase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__UpperCamelCase = Trainer(
model=_lowercase , args=_lowercase , train_dataset=_lowercase , eval_dataset=_lowercase , compute_metrics=_lowercase , data_collator=_lowercase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__UpperCamelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__UpperCamelCase = trainer.evaluate()
__UpperCamelCase = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_master():
with open(_lowercase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , _lowercase , _lowercase )
writer.write('%s = %s\n' % (key, value) )
results.update(_lowercase )
return results
def _A ( _lowercase ) -> List[Any]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 310
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowercase = {
'''configuration_layoutlmv2''': ['''LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LayoutLMv2Config'''],
'''processing_layoutlmv2''': ['''LayoutLMv2Processor'''],
'''tokenization_layoutlmv2''': ['''LayoutLMv2Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ['''LayoutLMv2TokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ['''LayoutLMv2FeatureExtractor''']
__lowercase = ['''LayoutLMv2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv2ForQuestionAnswering''',
'''LayoutLMv2ForSequenceClassification''',
'''LayoutLMv2ForTokenClassification''',
'''LayoutLMv2Layer''',
'''LayoutLMv2Model''',
'''LayoutLMv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 43
|
import os
def _A ( ) -> Tuple:
"""simple docstring"""
with open(os.path.dirname(_lowercase ) + '/p022_names.txt' ) as file:
__UpperCamelCase = str(file.readlines()[0] )
__UpperCamelCase = names.replace('"' , '' ).split(',' )
names.sort()
__UpperCamelCase = 0
__UpperCamelCase = 0
for i, name in enumerate(_lowercase ):
for letter in name:
name_score += ord(_lowercase ) - 64
total_score += (i + 1) * name_score
__UpperCamelCase = 0
return total_score
if __name__ == "__main__":
print(solution())
| 310
| 0
|
"""simple docstring"""
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
_a : List[Any] = logging.get_logger(__name__)
# General docstring
_a : Optional[int] = 'RegNetConfig'
# Base docstring
_a : Union[str, Any] = 'facebook/regnet-y-040'
_a : Tuple = [1, 1_088, 7, 7]
# Image classification docstring
_a : Tuple = 'facebook/regnet-y-040'
_a : Union[str, Any] = 'tabby, tabby cat'
_a : Union[str, Any] = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __A ( nn.Module ):
def __init__( self , a__ , a__ , a__ = 3 , a__ = 1 , a__ = 1 , a__ = "relu" , ):
super().__init__()
_lowerCAmelCase : str = nn.Convad(
a__ , a__ , kernel_size=a__ , stride=a__ , padding=kernel_size // 2 , groups=a__ , bias=a__ , )
_lowerCAmelCase : str = nn.BatchNormad(a__ )
_lowerCAmelCase : Optional[int] = ACTaFN[activation] if activation is not None else nn.Identity()
def __A ( self , a__ ):
_lowerCAmelCase : List[str] = self.convolution(a__ )
_lowerCAmelCase : Union[str, Any] = self.normalization(a__ )
_lowerCAmelCase : List[Any] = self.activation(a__ )
return hidden_state
class __A ( nn.Module ):
def __init__( self , a__ ):
super().__init__()
_lowerCAmelCase : Optional[int] = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
_lowerCAmelCase : List[Any] = config.num_channels
def __A ( self , a__ ):
_lowerCAmelCase : Tuple = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
_lowerCAmelCase : List[Any] = self.embedder(a__ )
return hidden_state
class __A ( nn.Module ):
def __init__( self , a__ , a__ , a__ = 2 ):
super().__init__()
_lowerCAmelCase : Union[str, Any] = nn.Convad(a__ , a__ , kernel_size=1 , stride=a__ , bias=a__ )
_lowerCAmelCase : Optional[int] = nn.BatchNormad(a__ )
def __A ( self , a__ ):
_lowerCAmelCase : str = self.convolution(a__ )
_lowerCAmelCase : Tuple = self.normalization(a__ )
return hidden_state
class __A ( nn.Module ):
def __init__( self , a__ , a__ ):
super().__init__()
_lowerCAmelCase : Dict = nn.AdaptiveAvgPoolad((1, 1) )
_lowerCAmelCase : Tuple = nn.Sequential(
nn.Convad(a__ , a__ , kernel_size=1 ) , nn.ReLU() , nn.Convad(a__ , a__ , kernel_size=1 ) , nn.Sigmoid() , )
def __A ( self , a__ ):
# b c h w -> b c 1 1
_lowerCAmelCase : Tuple = self.pooler(a__ )
_lowerCAmelCase : int = self.attention(a__ )
_lowerCAmelCase : Optional[Any] = hidden_state * attention
return hidden_state
class __A ( nn.Module ):
def __init__( self , a__ , a__ , a__ , a__ = 1 ):
super().__init__()
_lowerCAmelCase : Optional[Any] = in_channels != out_channels or stride != 1
_lowerCAmelCase : Optional[Any] = max(1 , out_channels // config.groups_width )
_lowerCAmelCase : Tuple = (
RegNetShortCut(a__ , a__ , stride=a__ ) if should_apply_shortcut else nn.Identity()
)
_lowerCAmelCase : str = nn.Sequential(
RegNetConvLayer(a__ , a__ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(a__ , a__ , stride=a__ , groups=a__ , activation=config.hidden_act ) , RegNetConvLayer(a__ , a__ , kernel_size=1 , activation=a__ ) , )
_lowerCAmelCase : Union[str, Any] = ACTaFN[config.hidden_act]
def __A ( self , a__ ):
_lowerCAmelCase : Any = hidden_state
_lowerCAmelCase : Any = self.layer(a__ )
_lowerCAmelCase : str = self.shortcut(a__ )
hidden_state += residual
_lowerCAmelCase : int = self.activation(a__ )
return hidden_state
class __A ( nn.Module ):
def __init__( self , a__ , a__ , a__ , a__ = 1 ):
super().__init__()
_lowerCAmelCase : Dict = in_channels != out_channels or stride != 1
_lowerCAmelCase : List[Any] = max(1 , out_channels // config.groups_width )
_lowerCAmelCase : List[Any] = (
RegNetShortCut(a__ , a__ , stride=a__ ) if should_apply_shortcut else nn.Identity()
)
_lowerCAmelCase : Tuple = nn.Sequential(
RegNetConvLayer(a__ , a__ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(a__ , a__ , stride=a__ , groups=a__ , activation=config.hidden_act ) , RegNetSELayer(a__ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(a__ , a__ , kernel_size=1 , activation=a__ ) , )
_lowerCAmelCase : Dict = ACTaFN[config.hidden_act]
def __A ( self , a__ ):
_lowerCAmelCase : List[str] = hidden_state
_lowerCAmelCase : Optional[int] = self.layer(a__ )
_lowerCAmelCase : List[Any] = self.shortcut(a__ )
hidden_state += residual
_lowerCAmelCase : List[str] = self.activation(a__ )
return hidden_state
class __A ( nn.Module ):
def __init__( self , a__ , a__ , a__ , a__ = 2 , a__ = 2 , ):
super().__init__()
_lowerCAmelCase : List[Any] = RegNetXLayer if config.layer_type == """x""" else RegNetYLayer
_lowerCAmelCase : str = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
a__ , a__ , a__ , stride=a__ , ) , *[layer(a__ , a__ , a__ ) for _ in range(depth - 1 )] , )
def __A ( self , a__ ):
_lowerCAmelCase : List[Any] = self.layers(a__ )
return hidden_state
class __A ( nn.Module ):
def __init__( self , a__ ):
super().__init__()
_lowerCAmelCase : Optional[int] = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
a__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_lowerCAmelCase : List[str] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(a__ , config.depths[1:] ):
self.stages.append(RegNetStage(a__ , a__ , a__ , depth=a__ ) )
def __A ( self , a__ , a__ = False , a__ = True ):
_lowerCAmelCase : Any = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_lowerCAmelCase : Union[str, Any] = hidden_states + (hidden_state,)
_lowerCAmelCase : Union[str, Any] = stage_module(a__ )
if output_hidden_states:
_lowerCAmelCase : Union[str, Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=a__ , hidden_states=a__ )
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Tuple = RegNetConfig
_UpperCamelCase : int = "regnet"
_UpperCamelCase : List[str] = "pixel_values"
_UpperCamelCase : Union[str, Any] = True
def __A ( self , a__ ):
if isinstance(a__ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" )
elif isinstance(a__ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def __A ( self , a__ , a__=False ):
if isinstance(a__ , a__ ):
_lowerCAmelCase : Optional[int] = value
_a : List[Any] = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
_a : List[Any] = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , SCREAMING_SNAKE_CASE_ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ ):
super().__init__(a__ )
_lowerCAmelCase : List[Any] = config
_lowerCAmelCase : Any = RegNetEmbeddings(a__ )
_lowerCAmelCase : List[str] = RegNetEncoder(a__ )
_lowerCAmelCase : str = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(a__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=a__ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __A ( self , a__ , a__ = None , a__ = None ):
_lowerCAmelCase : Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCAmelCase : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase : List[Any] = self.embedder(a__ )
_lowerCAmelCase : Optional[Any] = self.encoder(
a__ , output_hidden_states=a__ , return_dict=a__ )
_lowerCAmelCase : str = encoder_outputs[0]
_lowerCAmelCase : Optional[Any] = self.pooler(a__ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=a__ , pooler_output=a__ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , SCREAMING_SNAKE_CASE_ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ ):
super().__init__(a__ )
_lowerCAmelCase : Any = config.num_labels
_lowerCAmelCase : Optional[int] = RegNetModel(a__ )
# classification head
_lowerCAmelCase : Optional[Any] = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(a__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=a__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __A ( self , a__ = None , a__ = None , a__ = None , a__ = None , ):
_lowerCAmelCase : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase : Union[str, Any] = self.regnet(a__ , output_hidden_states=a__ , return_dict=a__ )
_lowerCAmelCase : str = outputs.pooler_output if return_dict else outputs[1]
_lowerCAmelCase : str = self.classifier(a__ )
_lowerCAmelCase : List[str] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_lowerCAmelCase : str = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_lowerCAmelCase : Optional[int] = """single_label_classification"""
else:
_lowerCAmelCase : Optional[int] = """multi_label_classification"""
if self.config.problem_type == "regression":
_lowerCAmelCase : Optional[Any] = MSELoss()
if self.num_labels == 1:
_lowerCAmelCase : int = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_lowerCAmelCase : Dict = loss_fct(a__ , a__ )
elif self.config.problem_type == "single_label_classification":
_lowerCAmelCase : Any = CrossEntropyLoss()
_lowerCAmelCase : Dict = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_lowerCAmelCase : Dict = BCEWithLogitsLoss()
_lowerCAmelCase : Tuple = loss_fct(a__ , a__ )
if not return_dict:
_lowerCAmelCase : Optional[int] = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=a__ , logits=a__ , hidden_states=outputs.hidden_states )
| 44
|
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def _A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=True , _lowercase="pt" ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = {'add_prefix_space': True} if isinstance(_lowercase , _lowercase ) and not line.startswith(' ' ) else {}
__UpperCamelCase = padding_side
return tokenizer(
[line] , max_length=_lowercase , padding='max_length' if pad_to_max_length else None , truncation=_lowercase , return_tensors=_lowercase , add_special_tokens=_lowercase , **_lowercase , )
def _A ( _lowercase , _lowercase , _lowercase=None , ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = input_ids.ne(_lowercase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCamelCase (_a ):
def __init__( self: List[str],A_: str,A_: List[str],A_: List[str],A_: List[str],A_: Tuple="train",A_: Any=None,A_: List[str]=None,A_: List[Any]=None,A_: int="",):
'''simple docstring'''
super().__init__()
__UpperCamelCase = Path(A_ ).joinpath(type_path + '.source' )
__UpperCamelCase = Path(A_ ).joinpath(type_path + '.target' )
__UpperCamelCase = self.get_char_lens(self.src_file )
__UpperCamelCase = max_source_length
__UpperCamelCase = max_target_length
assert min(self.src_lens ) > 0, F'''found empty line in {self.src_file}'''
__UpperCamelCase = tokenizer
__UpperCamelCase = prefix
if n_obs is not None:
__UpperCamelCase = self.src_lens[:n_obs]
__UpperCamelCase = src_lang
__UpperCamelCase = tgt_lang
def __len__( self: Optional[Any] ):
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self: int,A_: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = index + 1 # linecache starts at 1
__UpperCamelCase = self.prefix + linecache.getline(str(self.src_file ),A_ ).rstrip('\n' )
__UpperCamelCase = linecache.getline(str(self.tgt_file ),A_ ).rstrip('\n' )
assert source_line, F'''empty source line for index {index}'''
assert tgt_line, F'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer,A_ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__UpperCamelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer,A_ ) else self.tokenizer
)
__UpperCamelCase = self.tokenizer.generator if isinstance(self.tokenizer,A_ ) else self.tokenizer
__UpperCamelCase = encode_line(A_,A_,self.max_source_length,'right' )
__UpperCamelCase = encode_line(A_,A_,self.max_target_length,'right' )
__UpperCamelCase = source_inputs['input_ids'].squeeze()
__UpperCamelCase = target_inputs['input_ids'].squeeze()
__UpperCamelCase = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def snake_case_ ( A_: List[Any] ):
'''simple docstring'''
return [len(A_ ) for x in Path(A_ ).open().readlines()]
def snake_case_ ( self: Union[str, Any],A_: Any ):
'''simple docstring'''
__UpperCamelCase = torch.stack([x['input_ids'] for x in batch] )
__UpperCamelCase = torch.stack([x['attention_mask'] for x in batch] )
__UpperCamelCase = torch.stack([x['decoder_input_ids'] for x in batch] )
__UpperCamelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer,A_ )
else self.tokenizer.pad_token_id
)
__UpperCamelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer,A_ )
else self.tokenizer.pad_token_id
)
__UpperCamelCase = trim_batch(A_,A_ )
__UpperCamelCase, __UpperCamelCase = trim_batch(A_,A_,attention_mask=A_ )
__UpperCamelCase = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
__snake_case = getLogger(__name__)
def _A ( _lowercase ) -> Any:
"""simple docstring"""
return list(itertools.chain.from_iterable(_lowercase ) )
def _A ( _lowercase ) -> None:
"""simple docstring"""
__UpperCamelCase = get_git_info()
save_json(_lowercase , os.path.join(_lowercase , 'git_log.json' ) )
def _A ( _lowercase , _lowercase , _lowercase=4 , **_lowercase ) -> List[Any]:
"""simple docstring"""
with open(_lowercase , 'w' ) as f:
json.dump(_lowercase , _lowercase , indent=_lowercase , **_lowercase )
def _A ( _lowercase ) -> Union[str, Any]:
"""simple docstring"""
with open(_lowercase ) as f:
return json.load(_lowercase )
def _A ( ) -> Dict:
"""simple docstring"""
__UpperCamelCase = git.Repo(search_parent_directories=_lowercase )
__UpperCamelCase = {
'repo_id': str(_lowercase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def _A ( _lowercase , _lowercase ) -> List:
"""simple docstring"""
return list(map(_lowercase , _lowercase ) )
def _A ( _lowercase , _lowercase ) -> Tuple:
"""simple docstring"""
with open(_lowercase , 'wb' ) as f:
return pickle.dump(_lowercase , _lowercase )
def _A ( _lowercase ) -> List[Any]:
"""simple docstring"""
def remove_articles(_lowercase ):
return re.sub(r'\b(a|an|the)\b' , ' ' , _lowercase )
def white_space_fix(_lowercase ):
return " ".join(text.split() )
def remove_punc(_lowercase ):
__UpperCamelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_lowercase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_lowercase ) ) ) )
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = normalize_answer(_lowercase ).split()
__UpperCamelCase = normalize_answer(_lowercase ).split()
__UpperCamelCase = Counter(_lowercase ) & Counter(_lowercase )
__UpperCamelCase = sum(common.values() )
if num_same == 0:
return 0
__UpperCamelCase = 1.0 * num_same / len(_lowercase )
__UpperCamelCase = 1.0 * num_same / len(_lowercase )
__UpperCamelCase = (2 * precision * recall) / (precision + recall)
return fa
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
return normalize_answer(_lowercase ) == normalize_answer(_lowercase )
def _A ( _lowercase , _lowercase ) -> Dict:
"""simple docstring"""
assert len(_lowercase ) == len(_lowercase )
__UpperCamelCase = 0
for hypo, pred in zip(_lowercase , _lowercase ):
em += exact_match_score(_lowercase , _lowercase )
if len(_lowercase ) > 0:
em /= len(_lowercase )
return {"em": em}
def _A ( _lowercase ) -> Optional[Any]:
"""simple docstring"""
return model_prefix.startswith('rag' )
def _A ( _lowercase , _lowercase , _lowercase ) -> Dict:
"""simple docstring"""
__UpperCamelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__UpperCamelCase = 'dropout_rate'
for p in extra_params:
if getattr(_lowercase , _lowercase , _lowercase ):
if not hasattr(_lowercase , _lowercase ) and not hasattr(_lowercase , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(_lowercase ) )
delattr(_lowercase , _lowercase )
continue
__UpperCamelCase = p if hasattr(_lowercase , _lowercase ) else equivalent_param[p]
setattr(_lowercase , _lowercase , getattr(_lowercase , _lowercase ) )
delattr(_lowercase , _lowercase )
return hparams, config
| 310
| 0
|
"""simple docstring"""
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def lowercase ( lowerCAmelCase__ : Tuple ) -> str:
if is_torch_version('''<''' , '''2.0.0''' ) or not hasattr(lowerCAmelCase__ , '''_dynamo''' ):
return False
return isinstance(lowerCAmelCase__ , torch._dynamo.eval_frame.OptimizedModule )
def lowercase ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : bool = True ) -> int:
__a = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
__a = is_compiled_module(lowerCAmelCase__ )
if is_compiled:
__a = model
__a = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = model.module
if not keep_fpaa_wrapper:
__a = getattr(lowerCAmelCase__ , '''forward''' )
__a = model.__dict__.pop('''_original_forward''' , lowerCAmelCase__ )
if original_forward is not None:
while hasattr(lowerCAmelCase__ , '''__wrapped__''' ):
__a = forward.__wrapped__
if forward == original_forward:
break
__a = forward
if getattr(lowerCAmelCase__ , '''_converted_to_transformer_engine''' , lowerCAmelCase__ ):
convert_model(lowerCAmelCase__ , to_transformer_engine=lowerCAmelCase__ )
if is_compiled:
__a = model
__a = compiled_model
return model
def lowercase ( ) -> Optional[int]:
PartialState().wait_for_everyone()
def lowercase ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Tuple ) -> int:
if PartialState().distributed_type == DistributedType.TPU:
xm.save(lowerCAmelCase__ , lowerCAmelCase__ )
elif PartialState().local_process_index == 0:
torch.save(lowerCAmelCase__ , lowerCAmelCase__ )
@contextmanager
def lowercase ( **lowerCAmelCase__ : List[str] ) -> List[str]:
for key, value in kwargs.items():
__a = str(lowerCAmelCase__ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def lowercase ( lowerCAmelCase__ : Any ) -> Optional[int]:
if not hasattr(lowerCAmelCase__ , '''__qualname__''' ) and not hasattr(lowerCAmelCase__ , '''__name__''' ):
__a = getattr(lowerCAmelCase__ , '''__class__''' , lowerCAmelCase__ )
if hasattr(lowerCAmelCase__ , '''__qualname__''' ):
return obj.__qualname__
if hasattr(lowerCAmelCase__ , '''__name__''' ):
return obj.__name__
return str(lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Union[str, Any] ) -> str:
for key, value in source.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = destination.setdefault(lowerCAmelCase__ , {} )
merge_dicts(lowerCAmelCase__ , lowerCAmelCase__ )
else:
__a = value
return destination
def lowercase ( lowerCAmelCase__ : int = None ) -> bool:
if port is None:
__a = 29500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('''localhost''', port) ) == 0
| 45
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 310
| 0
|
"""simple docstring"""
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowercase :
@staticmethod
def _snake_case ( *lowercase , **lowercase ) -> Any:
pass
@is_pipeline_test
@require_vision
class lowercase ( unittest.TestCase ):
@require_torch
def _snake_case ( self ) -> List[str]:
lowerCAmelCase = pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" , )
lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowerCAmelCase = image_classifier(lowercase , candidate_labels=["""a""", """b""", """c"""] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(lowercase ) , [
[{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """b"""}, {"""score""": 0.333, """label""": """c"""}],
[{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """c"""}, {"""score""": 0.333, """label""": """b"""}],
] , )
lowerCAmelCase = image_classifier([image] * 5 , candidate_labels=["""A""", """B""", """C"""] , batch_size=2 )
self.assertEqual(
nested_simplify(lowercase ) , [
[
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
],
[
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
],
[
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
],
[
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
],
[
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
],
] , )
@require_tf
def _snake_case ( self ) -> List[str]:
lowerCAmelCase = pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" , framework="""tf""" )
lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowerCAmelCase = image_classifier(lowercase , candidate_labels=["""a""", """b""", """c"""] )
self.assertEqual(
nested_simplify(lowercase ) , [{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """b"""}, {"""score""": 0.333, """label""": """c"""}] , )
lowerCAmelCase = image_classifier([image] * 5 , candidate_labels=["""A""", """B""", """C"""] , batch_size=2 )
self.assertEqual(
nested_simplify(lowercase ) , [
[
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
],
[
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
],
[
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
],
[
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
],
[
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
{"""score""": 0.333, """label""": ANY(lowercase )},
],
] , )
@slow
@require_torch
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = pipeline(
task="""zero-shot-image-classification""" , model="""openai/clip-vit-base-patch32""" , )
# This is an image of 2 cats with remotes and no planes
lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowerCAmelCase = image_classifier(lowercase , candidate_labels=["""cat""", """plane""", """remote"""] )
self.assertEqual(
nested_simplify(lowercase ) , [
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
] , )
lowerCAmelCase = image_classifier([image] * 5 , candidate_labels=["""cat""", """plane""", """remote"""] , batch_size=2 )
self.assertEqual(
nested_simplify(lowercase ) , [
[
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
],
]
* 5 , )
@slow
@require_tf
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = pipeline(
task="""zero-shot-image-classification""" , model="""openai/clip-vit-base-patch32""" , framework="""tf""" )
# This is an image of 2 cats with remotes and no planes
lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowerCAmelCase = image_classifier(lowercase , candidate_labels=["""cat""", """plane""", """remote"""] )
self.assertEqual(
nested_simplify(lowercase ) , [
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
] , )
lowerCAmelCase = image_classifier([image] * 5 , candidate_labels=["""cat""", """plane""", """remote"""] , batch_size=2 )
self.assertEqual(
nested_simplify(lowercase ) , [
[
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
],
]
* 5 , )
| 46
|
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
__snake_case = '''https://www.indeed.co.in/jobs?q=mobile+app+development&l='''
def _A ( _lowercase = "mumbai" ) -> Generator[tuple[str, str], None, None]:
"""simple docstring"""
__UpperCamelCase = BeautifulSoup(requests.get(url + location ).content , 'html.parser' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('div' , attrs={'data-tn-component': 'organicJob'} ):
__UpperCamelCase = job.find('a' , attrs={'data-tn-element': 'jobTitle'} ).text.strip()
__UpperCamelCase = job.find('span' , {'class': 'company'} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('''Bangalore'''), 1):
print(f"""Job {i:>2} is {job[0]} at {job[1]}""")
| 310
| 0
|
'''simple docstring'''
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
lowerCamelCase : Dict = "\\n Text data.\n Second line of data."
lowerCamelCase : List[str] = "file"
@pytest.fixture(scope='session' )
def _lowerCAmelCase ( _UpperCamelCase : Dict ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =tmp_path_factory.mktemp('data' ) / (FILE_PATH + '.zstd')
_SCREAMING_SNAKE_CASE =bytes(_UpperCamelCase , 'utf-8' )
with zstd.open(_UpperCamelCase , 'wb' ) as f:
f.write(_UpperCamelCase )
return path
@pytest.fixture
def _lowerCAmelCase ( _UpperCamelCase : List[str] ) -> Dict:
"""simple docstring"""
with open(os.path.join(tmpfs.local_root_dir , _UpperCamelCase ) , 'w' ) as f:
f.write(_UpperCamelCase )
return FILE_PATH
@pytest.mark.parametrize('compression_format' , ['gzip', 'xz', 'zstd'] )
def _lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={'gzip': gz_file, 'xz': xz_file, 'zstd': zstd_path}
_SCREAMING_SNAKE_CASE =input_paths[compression_format]
_SCREAMING_SNAKE_CASE =tmp_path / 'cache'
_SCREAMING_SNAKE_CASE =DownloadConfig(cache_dir=_UpperCamelCase , extract_compressed_file=_UpperCamelCase )
_SCREAMING_SNAKE_CASE =cached_path(_UpperCamelCase , download_config=_UpperCamelCase )
with open(_UpperCamelCase ) as f:
_SCREAMING_SNAKE_CASE =f.read()
with open(_UpperCamelCase ) as f:
_SCREAMING_SNAKE_CASE =f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('default_extracted' , [True, False] )
@pytest.mark.parametrize('default_cache_dir' , [True, False] )
def _lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : str , _UpperCamelCase : Tuple , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='custom_cache'
_SCREAMING_SNAKE_CASE ='custom_extracted_dir'
_SCREAMING_SNAKE_CASE =tmp_path / 'custom_extracted_path'
if default_extracted:
_SCREAMING_SNAKE_CASE =('downloads' if default_cache_dir else custom_cache_dir, 'extracted')
else:
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_DIR' , _UpperCamelCase )
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(_UpperCamelCase ) )
_SCREAMING_SNAKE_CASE =custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_SCREAMING_SNAKE_CASE =xz_file
_SCREAMING_SNAKE_CASE =(
DownloadConfig(extract_compressed_file=_UpperCamelCase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_UpperCamelCase )
)
_SCREAMING_SNAKE_CASE =cached_path(_UpperCamelCase , download_config=_UpperCamelCase )
assert Path(_UpperCamelCase ).parent.parts[-2:] == expected
def _lowerCAmelCase ( _UpperCamelCase : Optional[Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =str(Path(_UpperCamelCase ).resolve() )
assert cached_path(_UpperCamelCase ) == text_file
# relative path
_SCREAMING_SNAKE_CASE =str(Path(_UpperCamelCase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(_UpperCamelCase ) == text_file
def _lowerCAmelCase ( _UpperCamelCase : List[str] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =str(tmp_path.resolve() / '__missing_file__.txt' )
with pytest.raises(_UpperCamelCase ):
cached_path(_UpperCamelCase )
# relative path
_SCREAMING_SNAKE_CASE ='./__missing_file__.txt'
with pytest.raises(_UpperCamelCase ):
cached_path(_UpperCamelCase )
def _lowerCAmelCase ( _UpperCamelCase : Optional[Any] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =get_from_cache(f"tmp://{tmpfs_file}" )
with open(_UpperCamelCase ) as f:
_SCREAMING_SNAKE_CASE =f.read()
assert output_file_content == FILE_CONTENT
@patch('datasets.config.HF_DATASETS_OFFLINE' , _UpperCamelCase )
def _lowerCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
with pytest.raises(_UpperCamelCase ):
cached_path('https://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , _UpperCamelCase )
def _lowerCAmelCase ( _UpperCamelCase : List[str] ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(_UpperCamelCase ):
http_get('https://huggingface.co' , temp_file=_UpperCamelCase )
with pytest.raises(_UpperCamelCase ):
http_head('https://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , _UpperCamelCase )
def _lowerCAmelCase ( _UpperCamelCase : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(_UpperCamelCase ):
ftp_get('ftp://huggingface.co' , temp_file=_UpperCamelCase )
with pytest.raises(_UpperCamelCase ):
ftp_head('ftp://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , _UpperCamelCase )
def _lowerCAmelCase ( _UpperCamelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(_UpperCamelCase ):
fsspec_get('s3://huggingface.co' , temp_file=_UpperCamelCase )
with pytest.raises(_UpperCamelCase ):
fsspec_head('s3://huggingface.co' )
| 47
|
def _A ( _lowercase ) -> list:
"""simple docstring"""
def merge(_lowercase , _lowercase ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(_lowercase ) <= 1:
return collection
__UpperCamelCase = len(_lowercase ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case = input('''Enter numbers separated by a comma:\n''').strip()
__snake_case = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 310
| 0
|
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , ) -> Optional[int]:
lowerCamelCase : int = parent
lowerCamelCase : int = 13
lowerCamelCase : str = 7
lowerCamelCase : Any = True
lowerCamelCase : Optional[int] = True
lowerCamelCase : Dict = True
lowerCamelCase : List[Any] = 99
lowerCamelCase : List[Any] = 32
lowerCamelCase : str = 2
lowerCamelCase : Union[str, Any] = 4
lowerCamelCase : str = 37
lowerCamelCase : Any = "gelu"
lowerCamelCase : Optional[Any] = 0.1
lowerCamelCase : Dict = 0.1
lowerCamelCase : Optional[Any] = 512
lowerCamelCase : Optional[Any] = 16
lowerCamelCase : List[Any] = 2
lowerCamelCase : int = 0.02
lowerCamelCase : Tuple = 3
lowerCamelCase : Optional[int] = 4
lowerCamelCase : Any = None
def _lowercase ( self ) -> List[str]:
lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase : Union[str, Any] = None
if self.use_input_mask:
lowerCamelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase : Union[str, Any] = None
lowerCamelCase : Tuple = None
lowerCamelCase : Tuple = None
if self.use_labels:
lowerCamelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase : Dict = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase : Union[str, Any] = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self ) -> Dict:
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Dict = self.prepare_config_and_inputs()
lowerCamelCase : Union[str, Any] = True
lowerCamelCase : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
lowerCamelCase : List[Any] = TFEsmModel(config=UpperCamelCase__ )
lowerCamelCase : int = {"input_ids": input_ids, "attention_mask": input_mask}
lowerCamelCase : List[Any] = model(UpperCamelCase__ )
lowerCamelCase : int = [input_ids, input_mask]
lowerCamelCase : Optional[Any] = model(UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> str:
lowerCamelCase : Optional[int] = True
lowerCamelCase : Union[str, Any] = TFEsmModel(config=UpperCamelCase__ )
lowerCamelCase : str = {
"input_ids": input_ids,
"attention_mask": input_mask,
"encoder_hidden_states": encoder_hidden_states,
"encoder_attention_mask": encoder_attention_mask,
}
lowerCamelCase : List[str] = model(UpperCamelCase__ )
lowerCamelCase : Tuple = [input_ids, input_mask]
lowerCamelCase : Dict = model(UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ )
# Also check the case where encoder outputs are not passed
lowerCamelCase : Any = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
lowerCamelCase : List[str] = TFEsmForMaskedLM(config=UpperCamelCase__ )
lowerCamelCase : Any = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
lowerCamelCase : List[Any] = self.num_labels
lowerCamelCase : Dict = TFEsmForTokenClassification(config=UpperCamelCase__ )
lowerCamelCase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask}
lowerCamelCase : Dict = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self ) -> Optional[Any]:
lowerCamelCase : Tuple = self.prepare_config_and_inputs()
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Optional[Any] = config_and_inputs
lowerCamelCase : Any = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class UpperCamelCase__ (lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : Dict = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCamelCase_ : Optional[Any] = (
{
"""feature-extraction""": TFEsmModel,
"""fill-mask""": TFEsmForMaskedLM,
"""text-classification""": TFEsmForSequenceClassification,
"""token-classification""": TFEsmForTokenClassification,
"""zero-shot""": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase_ : Any = False
lowerCamelCase_ : Dict = False
def _lowercase ( self ) -> Any:
lowerCamelCase : Tuple = TFEsmModelTester(self )
lowerCamelCase : int = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def _lowercase ( self ) -> List[str]:
self.config_tester.run_common_tests()
def _lowercase ( self ) -> str:
lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _lowercase ( self ) -> List[str]:
lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*UpperCamelCase__ )
def _lowercase ( self ) -> Union[str, Any]:
lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ )
def _lowercase ( self ) -> List[Any]:
lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ )
@slow
def _lowercase ( self ) -> List[Any]:
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : Any = TFEsmModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@unittest.skip("Protein models do not support embedding resizing." )
def _lowercase ( self ) -> List[str]:
pass
@unittest.skip("Protein models do not support embedding resizing." )
def _lowercase ( self ) -> Optional[Any]:
pass
def _lowercase ( self ) -> Tuple:
lowerCamelCase , lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : Optional[Any] = model_class(UpperCamelCase__ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowerCamelCase : Any = model.get_bias()
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
for k, v in name.items():
assert isinstance(UpperCamelCase__ , tf.Variable )
else:
lowerCamelCase : str = model.get_output_embeddings()
assert x is None
lowerCamelCase : Optional[Any] = model.get_bias()
assert name is None
@require_tf
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
@slow
def _lowercase ( self ) -> Dict:
lowerCamelCase : int = TFEsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" )
lowerCamelCase : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase : Optional[int] = model(UpperCamelCase__ )[0]
lowerCamelCase : Union[str, Any] = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , UpperCamelCase__ )
# compare the actual values for a slice.
lowerCamelCase : List[str] = tf.constant(
[
[
[8.921518, -10.589814, -6.4671307],
[-6.3967156, -13.911377, -1.1211915],
[-7.781247, -13.951557, -3.740592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def _lowercase ( self ) -> str:
lowerCamelCase : Dict = TFEsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" )
lowerCamelCase : int = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCamelCase : Dict = model(UpperCamelCase__ )[0]
# compare the actual values for a slice.
lowerCamelCase : int = tf.constant(
[
[
[0.14443092, 0.54125327, 0.3247739],
[0.30340484, 0.00526676, 0.31077722],
[0.32278043, -0.24987096, 0.3414628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 48
|
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class __lowerCamelCase (_a ):
_lowercase = 0
_lowercase = False
_lowercase = 3.0
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: Any ):
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs(),{} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs(),{'a': 2} )
self.assertDictEqual(MockClass(a=2,b=A_ ).to_kwargs(),{'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2,c=2.2_5 ).to_kwargs(),{'a': 2, 'c': 2.2_5} )
@require_cuda
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = GradScalerKwargs(init_scale=1024,growth_factor=2 )
AcceleratorState._reset_state()
__UpperCamelCase = Accelerator(mixed_precision='fp16',kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
__UpperCamelCase = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale,1_0_2_4.0 )
self.assertEqual(scaler._growth_factor,2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor,0.5 )
self.assertEqual(scaler._growth_interval,2000 )
self.assertEqual(scaler._enabled,A_ )
@require_multi_gpu
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = ['torchrun', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(A_,env=os.environ.copy() )
if __name__ == "__main__":
__snake_case = DistributedDataParallelKwargs(bucket_cap_mb=1_5, find_unused_parameters=True)
__snake_case = Accelerator(kwargs_handlers=[ddp_scaler])
__snake_case = torch.nn.Linear(1_0_0, 2_0_0)
__snake_case = accelerator.prepare(model)
# Check the values changed in kwargs
__snake_case = ''''''
__snake_case = model.bucket_bytes_cap // (1_0_2_4 * 1_0_2_4)
if observed_bucket_cap_map != 1_5:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 310
| 0
|
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a = np.full((len(_UpperCAmelCase ), sequence_length, 2) , _UpperCAmelCase )
else:
__a = np.full((len(_UpperCAmelCase ), sequence_length) , _UpperCAmelCase )
for i, tensor in enumerate(_UpperCAmelCase ):
if padding_side == "right":
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a = tensor[:sequence_length]
else:
__a = tensor[:sequence_length]
else:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a = tensor[:sequence_length]
else:
__a = tensor[:sequence_length]
return out_tensor.tolist()
def __snake_case ( _UpperCAmelCase ):
__a = ord(_UpperCAmelCase )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
__a = unicodedata.category(_UpperCAmelCase )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : PreTrainedTokenizerBase
UpperCamelCase__ : Union[bool, str, PaddingStrategy] = True
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : int = -100
UpperCamelCase__ : str = "pt"
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]):
'''simple docstring'''
import torch
__a = '''label''' if '''label''' in features[0].keys() else '''labels'''
__a = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
__a = self.tokenizer.pad(
__SCREAMING_SNAKE_CASE , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
__a = torch.tensor(batch['''entity_ids''']).shape[1]
__a = self.tokenizer.padding_side
if padding_side == "right":
__a = [
list(__SCREAMING_SNAKE_CASE) + [self.label_pad_token_id] * (sequence_length - len(__SCREAMING_SNAKE_CASE)) for label in labels
]
else:
__a = [
[self.label_pad_token_id] * (sequence_length - len(__SCREAMING_SNAKE_CASE)) + list(__SCREAMING_SNAKE_CASE) for label in labels
]
__a = [feature['''ner_tags'''] for feature in features]
__a = padding_tensor(__SCREAMING_SNAKE_CASE , -1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = [feature['''original_entity_spans'''] for feature in features]
__a = padding_tensor(__SCREAMING_SNAKE_CASE , (-1, -1) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = {k: torch.tensor(__SCREAMING_SNAKE_CASE , dtype=torch.intaa) for k, v in batch.items()}
return batch
| 49
|
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class __lowerCamelCase (_a ):
_lowercase = ["""image_processor""", """tokenizer"""]
_lowercase = """OwlViTImageProcessor"""
_lowercase = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self: int,A_: Tuple=None,A_: int=None,**A_: int ):
'''simple docstring'''
__UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.',A_,)
__UpperCamelCase = kwargs.pop('feature_extractor' )
__UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(A_,A_ )
def __call__( self: str,A_: Dict=None,A_: Optional[int]=None,A_: Any=None,A_: Tuple="max_length",A_: int="np",**A_: Optional[Any] ):
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(A_,A_ ) or (isinstance(A_,A_ ) and not isinstance(text[0],A_ )):
__UpperCamelCase = [self.tokenizer(A_,padding=A_,return_tensors=A_,**A_ )]
elif isinstance(A_,A_ ) and isinstance(text[0],A_ ):
__UpperCamelCase = []
# Maximum number of queries across batch
__UpperCamelCase = max([len(A_ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(A_ ) != max_num_queries:
__UpperCamelCase = t + [' '] * (max_num_queries - len(A_ ))
__UpperCamelCase = self.tokenizer(A_,padding=A_,return_tensors=A_,**A_ )
encodings.append(A_ )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
__UpperCamelCase = np.concatenate([encoding['input_ids'] for encoding in encodings],axis=0 )
__UpperCamelCase = np.concatenate([encoding['attention_mask'] for encoding in encodings],axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__UpperCamelCase = jnp.concatenate([encoding['input_ids'] for encoding in encodings],axis=0 )
__UpperCamelCase = jnp.concatenate([encoding['attention_mask'] for encoding in encodings],axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__UpperCamelCase = torch.cat([encoding['input_ids'] for encoding in encodings],dim=0 )
__UpperCamelCase = torch.cat([encoding['attention_mask'] for encoding in encodings],dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__UpperCamelCase = tf.stack([encoding['input_ids'] for encoding in encodings],axis=0 )
__UpperCamelCase = tf.stack([encoding['attention_mask'] for encoding in encodings],axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
__UpperCamelCase = BatchEncoding()
__UpperCamelCase = input_ids
__UpperCamelCase = attention_mask
if query_images is not None:
__UpperCamelCase = BatchEncoding()
__UpperCamelCase = self.image_processor(
A_,return_tensors=A_,**A_ ).pixel_values
__UpperCamelCase = query_pixel_values
if images is not None:
__UpperCamelCase = self.image_processor(A_,return_tensors=A_,**A_ )
if text is not None and images is not None:
__UpperCamelCase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__UpperCamelCase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**A_ ),tensor_type=A_ )
def snake_case_ ( self: Optional[int],*A_: int,**A_: List[Any] ):
'''simple docstring'''
return self.image_processor.post_process(*A_,**A_ )
def snake_case_ ( self: str,*A_: Optional[int],**A_: List[Any] ):
'''simple docstring'''
return self.image_processor.post_process_object_detection(*A_,**A_ )
def snake_case_ ( self: str,*A_: Tuple,**A_: int ):
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*A_,**A_ )
def snake_case_ ( self: List[str],*A_: str,**A_: List[Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*A_,**A_ )
def snake_case_ ( self: int,*A_: Any,**A_: Tuple ):
'''simple docstring'''
return self.tokenizer.decode(*A_,**A_ )
@property
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.',A_,)
return self.image_processor_class
@property
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.',A_,)
return self.image_processor
| 310
| 0
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCAmelCase : Dict = {"""configuration_mra""": ["""MRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MraConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : int = [
"""MRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MraForMaskedLM""",
"""MraForMultipleChoice""",
"""MraForQuestionAnswering""",
"""MraForSequenceClassification""",
"""MraForTokenClassification""",
"""MraLayer""",
"""MraModel""",
"""MraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 50
|
import math
def _A ( _lowercase ) -> int:
"""simple docstring"""
if not isinstance(_lowercase , _lowercase ):
__UpperCamelCase = f'''Input value of [number={number}] must be an integer'''
raise TypeError(_lowercase )
if number < 1:
__UpperCamelCase = f'''Input value of [number={number}] must be > 0'''
raise ValueError(_lowercase )
elif number == 1:
return 3
elif number == 2:
return 5
else:
__UpperCamelCase = int(math.log(number // 3 , 2 ) ) + 2
__UpperCamelCase = [3, 5]
__UpperCamelCase = 2
__UpperCamelCase = 3
for block in range(1 , _lowercase ):
for _ in range(_lowercase ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(1_1):
__snake_case = 0
try:
__snake_case = proth(number)
except ValueError:
print(f"""ValueError: there is no {number}th Proth number""")
continue
print(f"""The {number}th Proth number: {value}""")
| 310
| 0
|
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
snake_case_ : int = "0.12" # assumed parallelism: 8
@require_flax
@is_staging_test
class __snake_case ( unittest.TestCase ):
@classmethod
def lowerCamelCase ( cls : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = TOKEN
HfFolder.save_token(_snake_case)
@classmethod
def lowerCamelCase ( cls : Union[str, Any]):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''')
except HTTPError:
pass
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37)
UpperCAmelCase_ = FlaxBertModel(_snake_case)
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token)
UpperCAmelCase_ = FlaxBertModel.from_pretrained(F"""{USER}/test-model-flax""")
UpperCAmelCase_ = flatten_dict(unfreeze(model.params))
UpperCAmelCase_ = flatten_dict(unfreeze(new_model.params))
for key in base_params.keys():
UpperCAmelCase_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_snake_case , 1e-3 , msg=F"""{key} not identical""")
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_snake_case , repo_id='''test-model-flax''' , push_to_hub=_snake_case , use_auth_token=self._token)
UpperCAmelCase_ = FlaxBertModel.from_pretrained(F"""{USER}/test-model-flax""")
UpperCAmelCase_ = flatten_dict(unfreeze(model.params))
UpperCAmelCase_ = flatten_dict(unfreeze(new_model.params))
for key in base_params.keys():
UpperCAmelCase_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_snake_case , 1e-3 , msg=F"""{key} not identical""")
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37)
UpperCAmelCase_ = FlaxBertModel(_snake_case)
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token)
UpperCAmelCase_ = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''')
UpperCAmelCase_ = flatten_dict(unfreeze(model.params))
UpperCAmelCase_ = flatten_dict(unfreeze(new_model.params))
for key in base_params.keys():
UpperCAmelCase_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_snake_case , 1e-3 , msg=F"""{key} not identical""")
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
_snake_case , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=_snake_case , use_auth_token=self._token)
UpperCAmelCase_ = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''')
UpperCAmelCase_ = flatten_dict(unfreeze(model.params))
UpperCAmelCase_ = flatten_dict(unfreeze(new_model.params))
for key in base_params.keys():
UpperCAmelCase_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_snake_case , 1e-3 , msg=F"""{key} not identical""")
def A (__A : Dict , __A : Dict ) -> int:
"""simple docstring"""
UpperCAmelCase_ = True
UpperCAmelCase_ = flatten_dict(modela.params )
UpperCAmelCase_ = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
UpperCAmelCase_ = False
return models_are_equal
@require_flax
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''')
UpperCAmelCase_ = FlaxBertModel(_snake_case)
UpperCAmelCase_ = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_snake_case , _snake_case))
with self.assertRaises(_snake_case):
UpperCAmelCase_ = FlaxBertModel.from_pretrained(_snake_case)
UpperCAmelCase_ = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case)
self.assertTrue(check_models_equal(_snake_case , _snake_case))
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''')
UpperCAmelCase_ = FlaxBertModel(_snake_case)
UpperCAmelCase_ = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_snake_case , _snake_case) , max_shard_size='''10KB''')
with self.assertRaises(_snake_case):
UpperCAmelCase_ = FlaxBertModel.from_pretrained(_snake_case)
UpperCAmelCase_ = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case)
self.assertTrue(check_models_equal(_snake_case , _snake_case))
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''bert'''
UpperCAmelCase_ = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(_snake_case):
UpperCAmelCase_ = FlaxBertModel.from_pretrained(_snake_case)
UpperCAmelCase_ = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case)
self.assertIsNotNone(_snake_case)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = '''bert'''
UpperCAmelCase_ = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(_snake_case):
UpperCAmelCase_ = FlaxBertModel.from_pretrained(_snake_case)
UpperCAmelCase_ = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case)
self.assertIsNotNone(_snake_case)
| 51
|
import torch
from transformers import AutoModel
class __lowerCamelCase (torch.nn.Module ):
def __init__( self: Union[str, Any],A_: Tuple="sayef/fsner-bert-base-uncased" ):
'''simple docstring'''
super(A_,self ).__init__()
__UpperCamelCase = AutoModel.from_pretrained(A_,return_dict=A_ )
__UpperCamelCase = torch.nn.CosineSimilarity(3,1E-08 )
__UpperCamelCase = torch.nn.Softmax(dim=1 )
def snake_case_ ( self: Tuple,**A_: Union[str, Any] ):
'''simple docstring'''
return self.bert(**A_ ).last_hidden_state
def snake_case_ ( self: Union[str, Any],A_: Union[str, Any] ):
'''simple docstring'''
return token_embeddings.sum(2,keepdim=A_ )
def snake_case_ ( self: List[str],A_: Dict,A_: Union[str, Any],A_: Union[str, Any]=1 ):
'''simple docstring'''
return self.softmax(T * self.cos(A_,A_ ) )
def snake_case_ ( self: Optional[int],A_: Union[str, Any],A_: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = W_supports['sizes'].tolist()
__UpperCamelCase = W_supports['start_token_id'].item()
__UpperCamelCase = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
__UpperCamelCase = self.BERT(**A_ )
__UpperCamelCase = self.BERT(**A_ )
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = W_supports['input_ids'] == start_token_id
__UpperCamelCase = W_supports['input_ids'] == end_token_id
for i, size in enumerate(A_ ):
if i == 0:
__UpperCamelCase = 0
else:
__UpperCamelCase = support_sizes[i - 1]
__UpperCamelCase = S[s : s + size][start_token_masks[s : s + size]]
__UpperCamelCase = S[s : s + size][end_token_masks[s : s + size]]
__UpperCamelCase = torch.matmul(q[i],s_start.T ).sum(1 ).softmax(0 )
__UpperCamelCase = torch.matmul(q[i],s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
__UpperCamelCase = torch.vstack((p_starts, p_start) )
__UpperCamelCase = torch.vstack((p_ends, p_end) )
else:
__UpperCamelCase = p_start
__UpperCamelCase = p_end
return p_starts, p_ends
| 310
| 0
|
import numpy
# List of input, output pairs
__lowerCamelCase : Any = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
__lowerCamelCase : Optional[Any] = (((515, 22, 13), 555), ((61, 35, 49), 150))
__lowerCamelCase : Tuple = [2, 4, 1, 5]
__lowerCamelCase : List[str] = len(train_data)
__lowerCamelCase : Union[str, Any] = 0.0_0_9
def A_ ( _lowerCAmelCase , _lowerCAmelCase="train" ) -> Tuple:
return calculate_hypothesis_value(_lowerCAmelCase , _lowerCAmelCase ) - output(
_lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase ) -> str:
UpperCamelCase : Dict = 0
for i in range(len(_lowerCAmelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Any:
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def A_ ( _lowerCAmelCase , _lowerCAmelCase=m ) -> Any:
UpperCamelCase : int = 0
for i in range(_lowerCAmelCase ):
if index == -1:
summation_value += _error(_lowerCAmelCase )
else:
summation_value += _error(_lowerCAmelCase ) * train_data[i][0][index]
return summation_value
def A_ ( _lowerCAmelCase ) -> Any:
UpperCamelCase : str = summation_of_cost_derivative(_lowerCAmelCase , _lowerCAmelCase ) / m
return cost_derivative_value
def A_ ( ) -> Union[str, Any]:
global parameter_vector
# Tune these values to set a tolerance value for predicted output
UpperCamelCase : int = 0.000_002
UpperCamelCase : Optional[int] = 0
UpperCamelCase : List[str] = 0
while True:
j += 1
UpperCamelCase : List[Any] = [0, 0, 0, 0]
for i in range(0 , len(_lowerCAmelCase ) ):
UpperCamelCase : List[Any] = get_cost_derivative(i - 1 )
UpperCamelCase : Union[str, Any] = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
_lowerCAmelCase , _lowerCAmelCase , atol=_lowerCAmelCase , rtol=_lowerCAmelCase , ):
break
UpperCamelCase : Union[str, Any] = temp_parameter_vector
print(("Number of iterations:", j) )
def A_ ( ) -> Tuple:
for i in range(len(_lowerCAmelCase ) ):
print(("Actual output value:", output(_lowerCAmelCase , "test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(_lowerCAmelCase , "test" )) )
if __name__ == "__main__":
run_gradient_descent()
print("""\nTesting gradient descent for a linear hypothesis function.\n""")
test_gradient_descent()
| 52
|
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = BioGptTokenizer
_lowercase = False
def snake_case_ ( self: Any ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCamelCase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
__UpperCamelCase = dict(zip(A_,range(len(A_ ) ) ) )
__UpperCamelCase = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
__UpperCamelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file,'w' ) as fp:
fp.write(json.dumps(A_ ) )
with open(self.merges_file,'w' ) as fp:
fp.write('\n'.join(A_ ) )
def snake_case_ ( self: Optional[int],A_: List[Any] ):
'''simple docstring'''
__UpperCamelCase = 'lower newer'
__UpperCamelCase = 'lower newer'
return input_text, output_text
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = BioGptTokenizer(self.vocab_file,self.merges_file )
__UpperCamelCase = 'lower'
__UpperCamelCase = ['low', 'er</w>']
__UpperCamelCase = tokenizer.tokenize(A_ )
self.assertListEqual(A_,A_ )
__UpperCamelCase = tokens + ['<unk>']
__UpperCamelCase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ),A_ )
@slow
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
__UpperCamelCase = tokenizer.encode('sequence builders',add_special_tokens=A_ )
__UpperCamelCase = tokenizer.encode('multi-sequence build',add_special_tokens=A_ )
__UpperCamelCase = tokenizer.build_inputs_with_special_tokens(A_ )
__UpperCamelCase = tokenizer.build_inputs_with_special_tokens(A_,A_ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 310
| 0
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
a__ : str =logging.get_logger(__name__)
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str =["input_features", "attention_mask"]
def __init__( self : Union[str, Any] , __A : Optional[int]=8_0 , __A : Tuple=1_6_0_0_0 , __A : Optional[Any]=8_0 , __A : Any=0.0 , __A : Any=True , __A : List[str]=True , __A : str=True , **__A : List[Any] , ):
super().__init__(feature_size=__A , sampling_rate=__A , padding_value=__A , **__A )
__UpperCamelCase = num_mel_bins
__UpperCamelCase = do_ceptral_normalize
__UpperCamelCase = normalize_means
__UpperCamelCase = normalize_vars
__UpperCamelCase = True
def _lowerCamelCase ( self : Union[str, Any] , __A : np.ndarray , ):
__UpperCamelCase = waveform * (2**1_5) # Kaldi compliance: 16-bit signed integers
__UpperCamelCase = torch.from_numpy(__A ).unsqueeze(0 )
__UpperCamelCase = ta_kaldi.fbank(__A , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def _lowerCamelCase ( __A : np.ndarray , __A : int , __A : Optional[bool] = True , __A : Optional[bool] = True , __A : float = 0.0 , ):
# make sure we normalize float32 arrays
if normalize_means:
__UpperCamelCase = x[:input_length].mean(axis=0 )
__UpperCamelCase = np.subtract(__A , __A )
if normalize_vars:
__UpperCamelCase = x[:input_length].std(axis=0 )
__UpperCamelCase = np.divide(__A , __A )
if input_length < x.shape[0]:
__UpperCamelCase = padding_value
# make sure array is in float32
__UpperCamelCase = x.astype(np.floataa )
return x
def _lowerCamelCase ( self : int , __A : List[np.ndarray] , __A : Optional[np.ndarray] = None ):
__UpperCamelCase = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(__A , __A , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(__A , __A )
]
def __call__( self : List[Any] , __A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __A : Union[bool, str, PaddingStrategy] = False , __A : Optional[int] = None , __A : bool = False , __A : Optional[int] = None , __A : Optional[Union[str, TensorType]] = None , __A : Optional[int] = None , __A : Optional[bool] = None , **__A : Dict , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
__UpperCamelCase = isinstance(__A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
__UpperCamelCase = is_batched_numpy or (
isinstance(__A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__UpperCamelCase = [np.asarray(__A , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__A , np.ndarray ):
__UpperCamelCase = np.asarray(__A , dtype=np.floataa )
elif isinstance(__A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__UpperCamelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__UpperCamelCase = [raw_speech]
# extract fbank features
__UpperCamelCase = [self._extract_fbank_features(__A ) for waveform in raw_speech]
# convert into correct format for padding
__UpperCamelCase = BatchFeature({'input_features': features} )
__UpperCamelCase = self.pad(
__A , padding=__A , max_length=__A , truncation=__A , pad_to_multiple_of=__A , return_attention_mask=__A , **__A , )
# make sure list is in array format
__UpperCamelCase = padded_inputs.get('input_features' )
if isinstance(input_features[0] , __A ):
__UpperCamelCase = [np.asarray(__A , dtype=np.floataa ) for feature in input_features]
__UpperCamelCase = padded_inputs.get('attention_mask' )
if attention_mask is not None:
__UpperCamelCase = [np.asarray(__A , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
__UpperCamelCase = (
np.array(__A , dtype=np.intaa )
if self._get_padding_strategies(__A , max_length=__A ) is not PaddingStrategy.DO_NOT_PAD
else None
)
__UpperCamelCase = self.normalize(
padded_inputs['input_features'] , attention_mask=__A )
if return_tensors is not None:
__UpperCamelCase = padded_inputs.convert_to_tensors(__A )
return padded_inputs
| 53
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__snake_case = r'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(_a )
class __lowerCamelCase (_a ):
_lowercase = """rag"""
_lowercase = True
def __init__( self: Tuple,A_: Any=None,A_: Any=True,A_: List[Any]=None,A_: Optional[int]=None,A_: List[Any]=None,A_: str=None,A_: Union[str, Any]=None,A_: List[Any]=" / ",A_: Union[str, Any]=" // ",A_: List[Any]=5,A_: Optional[int]=300,A_: Tuple=768,A_: Tuple=8,A_: Optional[Any]="wiki_dpr",A_: int="train",A_: Union[str, Any]="compressed",A_: Optional[int]=None,A_: List[Any]=None,A_: List[str]=False,A_: List[str]=False,A_: str=0.0,A_: List[Any]=True,A_: Tuple=False,A_: int=False,A_: Dict=False,A_: Tuple=True,A_: int=None,**A_: Optional[int],):
'''simple docstring'''
super().__init__(
bos_token_id=A_,pad_token_id=A_,eos_token_id=A_,decoder_start_token_id=A_,forced_eos_token_id=A_,is_encoder_decoder=A_,prefix=A_,vocab_size=A_,**A_,)
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
__UpperCamelCase = kwargs.pop('question_encoder' )
__UpperCamelCase = question_encoder_config.pop('model_type' )
__UpperCamelCase = kwargs.pop('generator' )
__UpperCamelCase = decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
__UpperCamelCase = AutoConfig.for_model(A_,**A_ )
__UpperCamelCase = AutoConfig.for_model(A_,**A_ )
__UpperCamelCase = reduce_loss
__UpperCamelCase = label_smoothing
__UpperCamelCase = exclude_bos_score
__UpperCamelCase = do_marginalize
__UpperCamelCase = title_sep
__UpperCamelCase = doc_sep
__UpperCamelCase = n_docs
__UpperCamelCase = max_combined_length
__UpperCamelCase = dataset
__UpperCamelCase = dataset_split
__UpperCamelCase = index_name
__UpperCamelCase = retrieval_vector_size
__UpperCamelCase = retrieval_batch_size
__UpperCamelCase = passages_path
__UpperCamelCase = index_path
__UpperCamelCase = use_dummy_dataset
__UpperCamelCase = output_retrieved
__UpperCamelCase = do_deduplication
__UpperCamelCase = use_cache
if self.forced_eos_token_id is None:
__UpperCamelCase = getattr(self.generator,'forced_eos_token_id',A_ )
@classmethod
def snake_case_ ( cls: Any,A_: PretrainedConfig,A_: PretrainedConfig,**A_: int ):
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict(),generator=generator_config.to_dict(),**A_ )
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = copy.deepcopy(self.__dict__ )
__UpperCamelCase = self.question_encoder.to_dict()
__UpperCamelCase = self.generator.to_dict()
__UpperCamelCase = self.__class__.model_type
return output
| 310
| 0
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCamelCase_ ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Dict = BioGptTokenizer
snake_case__ : List[str] = False
def UpperCAmelCase_ ( self : Dict ) -> int:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__SCREAMING_SNAKE_CASE = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
__SCREAMING_SNAKE_CASE = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
__SCREAMING_SNAKE_CASE = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(UpperCAmelCase__ ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(UpperCAmelCase__ ) )
def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : str ) -> Dict:
__SCREAMING_SNAKE_CASE = "lower newer"
__SCREAMING_SNAKE_CASE = "lower newer"
return input_text, output_text
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = BioGptTokenizer(self.vocab_file , self.merges_file )
__SCREAMING_SNAKE_CASE = "lower"
__SCREAMING_SNAKE_CASE = ["low", "er</w>"]
__SCREAMING_SNAKE_CASE = tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokens + ["<unk>"]
__SCREAMING_SNAKE_CASE = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , UpperCAmelCase__ )
@slow
def UpperCAmelCase_ ( self : Dict ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
__SCREAMING_SNAKE_CASE = tokenizer.encode("sequence builders" , add_special_tokens=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer.encode("multi-sequence build" , add_special_tokens=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 54
|
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class __lowerCamelCase (_a ):
_lowercase = """M-CLIP"""
def __init__( self: int,A_: Any=1024,A_: Union[str, Any]=768,**A_: str ):
'''simple docstring'''
__UpperCamelCase = transformerDimSize
__UpperCamelCase = imageDimSize
super().__init__(**A_ )
class __lowerCamelCase (_a ):
_lowercase = MCLIPConfig
def __init__( self: int,A_: Optional[Any],*A_: List[str],**A_: Union[str, Any] ):
'''simple docstring'''
super().__init__(A_,*A_,**A_ )
__UpperCamelCase = XLMRobertaModel(A_ )
__UpperCamelCase = torch.nn.Linear(
in_features=config.transformerDimensions,out_features=config.numDims )
def snake_case_ ( self: Dict,A_: int,A_: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = self.transformer(input_ids=A_,attention_mask=A_ )[0]
__UpperCamelCase = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(A_ ), embs
| 310
| 0
|
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class snake_case :
"""simple docstring"""
def __init__( self , UpperCamelCase , UpperCamelCase=13 , UpperCamelCase=10 , UpperCamelCase=3 , UpperCamelCase=2 , UpperCamelCase=2 , UpperCamelCase=2 , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=32 , UpperCamelCase=5 , UpperCamelCase=4 , UpperCamelCase=37 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=10 , UpperCamelCase=0.02 , UpperCamelCase=0.9 , UpperCamelCase=None , ):
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = image_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = patch_size
lowerCamelCase_ = tubelet_size
lowerCamelCase_ = num_frames
lowerCamelCase_ = is_training
lowerCamelCase_ = use_labels
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = mask_ratio
lowerCamelCase_ = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
lowerCamelCase_ = (image_size // patch_size) ** 2
lowerCamelCase_ = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
lowerCamelCase_ = int(mask_ratio * self.seq_length )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = self.get_config()
return config, pixel_values, labels
def snake_case ( self ):
"""simple docstring"""
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = VideoMAEModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
lowerCamelCase_ = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = VideoMAEForPreTraining(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowerCamelCase_ = torch.ones((self.num_masks,) )
lowerCamelCase_ = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
lowerCamelCase_ = mask.expand(self.batch_size , -1 ).bool()
lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase )
# model only returns predictions for masked patches
lowerCamelCase_ = mask.sum().item()
lowerCamelCase_ = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
_lowerCamelCase = (
{"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = VideoMAEModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase , hidden_size=37 )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase=False ):
"""simple docstring"""
lowerCamelCase_ = copy.deepcopy(UpperCamelCase )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowerCamelCase_ = torch.ones((self.model_tester.num_masks,) )
lowerCamelCase_ = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
lowerCamelCase_ = mask.expand(self.model_tester.batch_size , -1 ).bool()
lowerCamelCase_ = bool_masked_pos.to(UpperCamelCase )
if return_labels:
if model_class in [
*get_values(UpperCamelCase ),
]:
lowerCamelCase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase )
return inputs_dict
def snake_case ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="VideoMAE does not use inputs_embeds" )
def snake_case ( self ):
"""simple docstring"""
pass
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase , nn.Linear ) )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(UpperCamelCase )
lowerCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase )
@slow
def snake_case ( self ):
"""simple docstring"""
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = VideoMAEModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
if not self.has_attentions:
pass
else:
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = True
for model_class in self.all_model_classes:
lowerCamelCase_ = self.model_tester.seq_length - self.model_tester.num_masks
lowerCamelCase_ = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = True
lowerCamelCase_ = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
lowerCamelCase_ = outputs.attentions
self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase_ = True
lowerCamelCase_ = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
lowerCamelCase_ = outputs.attentions
self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowerCamelCase_ = len(UpperCamelCase )
# Check attention is always last and order is fine
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
self.assertEqual(out_len + 1 , len(UpperCamelCase ) )
lowerCamelCase_ = outputs.attentions
self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def snake_case ( self ):
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
lowerCamelCase_ = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
lowerCamelCase_ = outputs.hidden_states
lowerCamelCase_ = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(UpperCamelCase ) , UpperCamelCase )
lowerCamelCase_ = self.model_tester.seq_length - self.model_tester.num_masks
lowerCamelCase_ = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def snake_case ( self ):
"""simple docstring"""
pass
def __snake_case ( ):
lowerCamelCase_ = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
lowerCamelCase_ = np.load(UpperCAmelCase_ )
return list(UpperCAmelCase_ )
@require_torch
@require_vision
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self ):
"""simple docstring"""
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = VideoMAEForVideoClassification.from_pretrained("MCG-NJU/videomae-base-finetuned-kinetics" ).to(
UpperCamelCase )
lowerCamelCase_ = self.default_image_processor
lowerCamelCase_ = prepare_video()
lowerCamelCase_ = image_processor(UpperCamelCase , return_tensors="pt" ).to(UpperCamelCase )
# forward pass
with torch.no_grad():
lowerCamelCase_ = model(**UpperCamelCase )
# verify the logits
lowerCamelCase_ = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , UpperCamelCase )
lowerCamelCase_ = torch.tensor([0.3_669, -0.0_688, -0.2_421] ).to(UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1e-4 ) )
@slow
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short" ).to(UpperCamelCase )
lowerCamelCase_ = self.default_image_processor
lowerCamelCase_ = prepare_video()
lowerCamelCase_ = image_processor(UpperCamelCase , return_tensors="pt" ).to(UpperCamelCase )
# add boolean mask, indicating which patches to mask
lowerCamelCase_ = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt" )
lowerCamelCase_ = torch.load(UpperCamelCase )
# forward pass
with torch.no_grad():
lowerCamelCase_ = model(**UpperCamelCase )
# verify the logits
lowerCamelCase_ = torch.Size([1, 1408, 1536] )
lowerCamelCase_ = torch.tensor(
[[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] , device=UpperCamelCase )
self.assertEqual(outputs.logits.shape , UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , UpperCamelCase , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
lowerCamelCase_ = torch.tensor([0.5_142] , device=UpperCamelCase )
self.assertTrue(torch.allclose(outputs.loss , UpperCamelCase , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
lowerCamelCase_ = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short" , norm_pix_loss=UpperCamelCase ).to(
UpperCamelCase )
with torch.no_grad():
lowerCamelCase_ = model(**UpperCamelCase )
lowerCamelCase_ = torch.tensor(torch.tensor([0.6_469] ) , device=UpperCamelCase )
self.assertTrue(torch.allclose(outputs.loss , UpperCamelCase , atol=1e-4 ) )
| 55
|
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class __lowerCamelCase :
_lowercase = XGLMConfig
_lowercase = {}
_lowercase = """gelu"""
def __init__( self: Optional[int],A_: Dict,A_: Any=14,A_: Optional[int]=7,A_: str=True,A_: Any=True,A_: Optional[int]=True,A_: Optional[int]=99,A_: List[str]=32,A_: Any=2,A_: Tuple=4,A_: List[str]=37,A_: Dict="gelu",A_: int=0.1,A_: List[str]=0.1,A_: int=512,A_: List[Any]=0.0_2,):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_input_mask
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = d_model
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = ffn_dim
__UpperCamelCase = activation_function
__UpperCamelCase = activation_dropout
__UpperCamelCase = attention_dropout
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = initializer_range
__UpperCamelCase = None
__UpperCamelCase = 0
__UpperCamelCase = 2
__UpperCamelCase = 1
def snake_case_ ( self: Dict ):
'''simple docstring'''
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length],self.vocab_size ),clip_value_min=0,clip_value_max=3 )
__UpperCamelCase = None
if self.use_input_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = self.get_config()
__UpperCamelCase = floats_tensor([self.num_hidden_layers, self.num_attention_heads],2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
return XGLMConfig(
vocab_size=self.vocab_size,d_model=self.hidden_size,num_layers=self.num_hidden_layers,attention_heads=self.num_attention_heads,ffn_dim=self.ffn_dim,activation_function=self.activation_function,activation_dropout=self.activation_dropout,attention_dropout=self.attention_dropout,max_position_embeddings=self.max_position_embeddings,initializer_range=self.initializer_range,use_cache=A_,bos_token_id=self.bos_token_id,eos_token_id=self.eos_token_id,pad_token_id=self.pad_token_id,return_dict=A_,)
def snake_case_ ( self: int ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
), (
__UpperCamelCase
), (
__UpperCamelCase
), (
__UpperCamelCase
),
) = config_and_inputs
__UpperCamelCase = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class __lowerCamelCase (_a , _a , unittest.TestCase ):
_lowercase = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
_lowercase = (TFXGLMForCausalLM,) if is_tf_available() else ()
_lowercase = (
{"""feature-extraction""": TFXGLMModel, """text-generation""": TFXGLMForCausalLM} if is_tf_available() else {}
)
_lowercase = False
_lowercase = False
_lowercase = False
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = TFXGLMModelTester(self )
__UpperCamelCase = ConfigTester(self,config_class=A_,n_embd=37 )
def snake_case_ ( self: Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
@slow
def snake_case_ ( self: Any ):
'''simple docstring'''
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = TFXGLMModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def snake_case_ ( self: Tuple ):
'''simple docstring'''
super().test_resize_token_embeddings()
@require_tf
class __lowerCamelCase (unittest.TestCase ):
@slow
def snake_case_ ( self: Optional[Any],A_: int=True ):
'''simple docstring'''
__UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = tf.convert_to_tensor([[2, 268, 9865]],dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__UpperCamelCase = [2, 268, 9865, 67, 11, 1988, 5_7252, 9865, 5, 984, 67, 1988, 21_3838, 1658, 53, 7_0446, 33, 6657, 278, 1581]
# fmt: on
__UpperCamelCase = model.generate(A_,do_sample=A_,num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist(),A_ )
@slow
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
__UpperCamelCase = tokenizer('Today is a nice day and',return_tensors='tf' )
__UpperCamelCase = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
__UpperCamelCase = model.generate(A_,do_sample=A_,seed=[7, 0] )
__UpperCamelCase = tokenizer.decode(output_ids[0],skip_special_tokens=A_ )
__UpperCamelCase = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(A_,A_ )
@slow
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = 'left'
# use different length sentences to test batching
__UpperCamelCase = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
__UpperCamelCase = tokenizer(A_,return_tensors='tf',padding=A_ )
__UpperCamelCase = inputs['input_ids']
__UpperCamelCase = model.generate(input_ids=A_,attention_mask=inputs['attention_mask'],max_new_tokens=12 )
__UpperCamelCase = tokenizer(sentences[0],return_tensors='tf' ).input_ids
__UpperCamelCase = model.generate(input_ids=A_,max_new_tokens=12 )
__UpperCamelCase = tokenizer(sentences[1],return_tensors='tf' ).input_ids
__UpperCamelCase = model.generate(input_ids=A_,max_new_tokens=12 )
__UpperCamelCase = tokenizer.batch_decode(A_,skip_special_tokens=A_ )
__UpperCamelCase = tokenizer.decode(output_non_padded[0],skip_special_tokens=A_ )
__UpperCamelCase = tokenizer.decode(output_padded[0],skip_special_tokens=A_ )
__UpperCamelCase = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(A_,A_ )
self.assertListEqual(A_,[non_padded_sentence, padded_sentence] )
| 310
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
a : Optional[Any] = logging.get_logger(__name__)
a : Tuple = {
'microsoft/layoutlmv3-base': 'https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json',
}
class a ( _lowerCamelCase ):
snake_case_ = "layoutlmv3"
def __init__( self : Optional[Any] , lowercase_ : Optional[Any]=5_0265 , lowercase_ : Optional[Any]=768 , lowercase_ : int=12 , lowercase_ : Optional[Any]=12 , lowercase_ : Dict=3072 , lowercase_ : str="gelu" , lowercase_ : Tuple=0.1 , lowercase_ : Tuple=0.1 , lowercase_ : List[Any]=512 , lowercase_ : Union[str, Any]=2 , lowercase_ : Dict=0.02 , lowercase_ : Optional[int]=1e-5 , lowercase_ : List[str]=1 , lowercase_ : Tuple=0 , lowercase_ : List[str]=2 , lowercase_ : Union[str, Any]=1024 , lowercase_ : Union[str, Any]=128 , lowercase_ : Optional[int]=128 , lowercase_ : Optional[Any]=True , lowercase_ : Optional[int]=32 , lowercase_ : Tuple=128 , lowercase_ : Dict=64 , lowercase_ : Tuple=256 , lowercase_ : Optional[int]=True , lowercase_ : List[str]=True , lowercase_ : str=True , lowercase_ : Any=224 , lowercase_ : List[Any]=3 , lowercase_ : Optional[Any]=16 , lowercase_ : int=None , **lowercase_ : Optional[Any] , ):
super().__init__(
vocab_size=lowercase_ , hidden_size=lowercase_ , num_hidden_layers=lowercase_ , num_attention_heads=lowercase_ , intermediate_size=lowercase_ , hidden_act=lowercase_ , hidden_dropout_prob=lowercase_ , attention_probs_dropout_prob=lowercase_ , max_position_embeddings=lowercase_ , type_vocab_size=lowercase_ , initializer_range=lowercase_ , layer_norm_eps=lowercase_ , pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ , )
snake_case_ = max_ad_position_embeddings
snake_case_ = coordinate_size
snake_case_ = shape_size
snake_case_ = has_relative_attention_bias
snake_case_ = rel_pos_bins
snake_case_ = max_rel_pos
snake_case_ = has_spatial_attention_bias
snake_case_ = rel_ad_pos_bins
snake_case_ = max_rel_ad_pos
snake_case_ = text_embed
snake_case_ = visual_embed
snake_case_ = input_size
snake_case_ = num_channels
snake_case_ = patch_size
snake_case_ = classifier_dropout
class a ( _lowerCamelCase ):
snake_case_ = version.parse("1.12" )
@property
def A_ ( self : List[Any] ):
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
else:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}),
] )
@property
def A_ ( self : Dict ):
return 1e-5
@property
def A_ ( self : int ):
return 12
def A_ ( self : List[Any] , lowercase_ : "ProcessorMixin" , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional["TensorType"] = None , lowercase_ : int = 3 , lowercase_ : int = 40 , lowercase_ : int = 40 , ):
setattr(processor.image_processor , '''apply_ocr''' , lowercase_ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
snake_case_ = compute_effective_axis_dimension(
lowercase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
snake_case_ = processor.tokenizer.num_special_tokens_to_add(lowercase_ )
snake_case_ = compute_effective_axis_dimension(
lowercase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowercase_ )
# Generate dummy inputs according to compute batch and sequence
snake_case_ = [[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
snake_case_ = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
snake_case_ = self._generate_dummy_images(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
snake_case_ = dict(
processor(
lowercase_ , text=lowercase_ , boxes=lowercase_ , return_tensors=lowercase_ , ) )
return inputs
| 56
|
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__snake_case = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json'''
with io.open(filename, '''r''', encoding='''utf-8''') as f:
__snake_case = json.load(f)
@require_torch
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: int,A_: int ):
'''simple docstring'''
return FSMTTokenizer.from_pretrained(A_ )
def snake_case_ ( self: Dict,A_: int ):
'''simple docstring'''
__UpperCamelCase = FSMTForConditionalGeneration.from_pretrained(A_ ).to(A_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 2_6.0],
['ru-en', 2_2.0],
['en-de', 2_2.0],
['de-en', 2_9.0],
] )
@slow
def snake_case_ ( self: Tuple,A_: Any,A_: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = F'''facebook/wmt19-{pair}'''
__UpperCamelCase = self.get_tokenizer(A_ )
__UpperCamelCase = self.get_model(A_ )
__UpperCamelCase = bleu_data[pair]['src']
__UpperCamelCase = bleu_data[pair]['tgt']
__UpperCamelCase = tokenizer(A_,return_tensors='pt',truncation=A_,padding='longest' ).to(A_ )
__UpperCamelCase = model.generate(
input_ids=batch.input_ids,num_beams=8,)
__UpperCamelCase = tokenizer.batch_decode(
A_,skip_special_tokens=A_,clean_up_tokenization_spaces=A_ )
__UpperCamelCase = calculate_bleu(A_,A_ )
print(A_ )
self.assertGreaterEqual(scores['bleu'],A_ )
| 310
| 0
|
"""simple docstring"""
from manim import *
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def snake_case ( self ):
__lowerCAmelCase = Rectangle(height=0.5 , width=0.5 )
__lowerCAmelCase = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
__lowerCAmelCase = [mem.copy() for i in range(6 )]
__lowerCAmelCase = [mem.copy() for i in range(6 )]
__lowerCAmelCase = VGroup(*__a ).arrange(__a , buff=0 )
__lowerCAmelCase = VGroup(*__a ).arrange(__a , buff=0 )
__lowerCAmelCase = VGroup(__a , __a ).arrange(__a , buff=0 )
__lowerCAmelCase = Text("CPU" , font_size=24 )
__lowerCAmelCase = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__a )
__lowerCAmelCase = [mem.copy() for i in range(4 )]
__lowerCAmelCase = VGroup(*__a ).arrange(__a , buff=0 )
__lowerCAmelCase = Text("GPU" , font_size=24 )
__lowerCAmelCase = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
gpu.move_to([-1, -1, 0] )
self.add(__a )
__lowerCAmelCase = [mem.copy() for i in range(6 )]
__lowerCAmelCase = VGroup(*__a ).arrange(__a , buff=0 )
__lowerCAmelCase = Text("Model" , font_size=24 )
__lowerCAmelCase = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
model.move_to([3, -1.0, 0] )
self.add(__a )
__lowerCAmelCase = []
for i, rect in enumerate(__a ):
rect.set_stroke(__a )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
__lowerCAmelCase = Rectangle(height=0.4_6 / 4 , width=0.4_6 / 3 ).set_stroke(width=0.0 ).set_fill(__a , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=__a )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__a , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__a , buff=0.0 )
self.add(__a )
cpu_targs.append(__a )
__lowerCAmelCase = [mem.copy() for i in range(6 )]
__lowerCAmelCase = VGroup(*__a ).arrange(__a , buff=0 )
__lowerCAmelCase = Text("Loaded Checkpoint" , font_size=24 )
__lowerCAmelCase = Group(__a , __a ).arrange(__a , aligned_edge=__a , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
__lowerCAmelCase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__lowerCAmelCase = MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__a , __a )
__lowerCAmelCase = MarkupText(
f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(__a , DOWN * 2.4 , aligned_edge=key_text.get_left() )
__lowerCAmelCase = MarkupText(
f"Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__a ) , Write(__a ) )
self.play(Write(__a , run_time=1 ) , Create(__a , run_time=1 ) )
__lowerCAmelCase = []
__lowerCAmelCase = []
for i, rect in enumerate(__a ):
__lowerCAmelCase = fill.copy().set_fill(__a , opacity=0.7 )
target.move_to(__a )
first_animations.append(GrowFromCenter(__a , run_time=1 ) )
__lowerCAmelCase = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__a , run_time=1.5 ) )
self.play(*__a )
self.play(*__a )
self.wait()
| 57
|
def _A ( _lowercase ) -> list[int]:
"""simple docstring"""
if length <= 0 or not isinstance(_lowercase , _lowercase ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(_lowercase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=1_0))
| 310
| 0
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
lowercase_ = logging.get_logger(__name__)
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__( self , *A , **A ) -> None:
warnings.warn(
"""The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use PerceiverImageProcessor instead.""" , A , )
super().__init__(*A , **A )
| 58
|
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = MgpstrTokenizer
_lowercase = False
_lowercase = {}
_lowercase = False
def snake_case_ ( self: int ):
'''simple docstring'''
super().setUp()
# fmt: off
__UpperCamelCase = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
__UpperCamelCase = dict(zip(A_,range(len(A_ ) ) ) )
__UpperCamelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file,'w',encoding='utf-8' ) as fp:
fp.write(json.dumps(A_ ) + '\n' )
def snake_case_ ( self: Dict,**A_: Tuple ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname,**A_ )
def snake_case_ ( self: List[Any],A_: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = 'tester'
__UpperCamelCase = 'tester'
return input_text, output_text
@unittest.skip('MGP-STR always lower cases letters.' )
def snake_case_ ( self: str ):
'''simple docstring'''
pass
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = self.get_tokenizers(do_lower_case=A_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__UpperCamelCase = '[SPECIAL_TOKEN]'
tokenizer.add_special_tokens({'cls_token': special_token} )
__UpperCamelCase = tokenizer.encode([special_token],add_special_tokens=A_ )
self.assertEqual(len(A_ ),1 )
__UpperCamelCase = tokenizer.decode(A_,skip_special_tokens=A_ )
self.assertTrue(special_token not in decoded )
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__UpperCamelCase, __UpperCamelCase = self.get_input_output_texts(A_ )
__UpperCamelCase = tokenizer.tokenize(A_ )
__UpperCamelCase = tokenizer.convert_tokens_to_ids(A_ )
__UpperCamelCase = tokenizer.encode(A_,add_special_tokens=A_ )
self.assertListEqual(A_,A_ )
__UpperCamelCase = tokenizer.convert_ids_to_tokens(A_ )
self.assertNotEqual(len(A_ ),0 )
__UpperCamelCase = tokenizer.decode(A_ )
self.assertIsInstance(A_,A_ )
self.assertEqual(text_a.replace(' ','' ),A_ )
@unittest.skip('MGP-STR tokenizer only handles one sequence.' )
def snake_case_ ( self: int ):
'''simple docstring'''
pass
@unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer' )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
pass
| 310
| 0
|
def UpperCamelCase ( __lowerCamelCase : str ):
if not all(char in "01" for char in bin_string ):
raise ValueError("Non-binary value was passed to the function" )
if not bin_string:
raise ValueError("Empty string was passed to the function" )
snake_case : Tuple = ""
while len(__lowerCamelCase ) % 3 != 0:
snake_case : Any = "0" + bin_string
snake_case : Any = [
bin_string[index : index + 3]
for index in range(len(__lowerCamelCase ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
snake_case : Union[str, Any] = 0
for index, val in enumerate(__lowerCamelCase ):
oct_val += int(2 ** (2 - index) * int(__lowerCamelCase ) )
oct_string += str(__lowerCamelCase )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 59
|
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"""The RoBERTa Model transformer with early exiting (DeeRoBERTa). """ , _a , )
class __lowerCamelCase (_a ):
_lowercase = RobertaConfig
_lowercase = """roberta"""
def __init__( self: Union[str, Any],A_: List[str] ):
'''simple docstring'''
super().__init__(A_ )
__UpperCamelCase = RobertaEmbeddings(A_ )
self.init_weights()
@add_start_docstrings(
"""RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. """ , _a , )
class __lowerCamelCase (_a ):
_lowercase = RobertaConfig
_lowercase = """roberta"""
def __init__( self: Any,A_: int ):
'''simple docstring'''
super().__init__(A_ )
__UpperCamelCase = config.num_labels
__UpperCamelCase = config.num_hidden_layers
__UpperCamelCase = DeeRobertaModel(A_ )
__UpperCamelCase = nn.Dropout(config.hidden_dropout_prob )
__UpperCamelCase = nn.Linear(config.hidden_size,self.config.num_labels )
@add_start_docstrings_to_model_forward(A_ )
def snake_case_ ( self: List[str],A_: int=None,A_: List[Any]=None,A_: List[str]=None,A_: List[str]=None,A_: Optional[int]=None,A_: List[str]=None,A_: Any=None,A_: List[Any]=-1,A_: List[Any]=False,):
'''simple docstring'''
__UpperCamelCase = self.num_layers
try:
__UpperCamelCase = self.roberta(
A_,attention_mask=A_,token_type_ids=A_,position_ids=A_,head_mask=A_,inputs_embeds=A_,)
__UpperCamelCase = outputs[1]
__UpperCamelCase = self.dropout(A_ )
__UpperCamelCase = self.classifier(A_ )
__UpperCamelCase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__UpperCamelCase = e.message
__UpperCamelCase = e.exit_layer
__UpperCamelCase = outputs[0]
if not self.training:
__UpperCamelCase = entropy(A_ )
__UpperCamelCase = []
__UpperCamelCase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__UpperCamelCase = MSELoss()
__UpperCamelCase = loss_fct(logits.view(-1 ),labels.view(-1 ) )
else:
__UpperCamelCase = CrossEntropyLoss()
__UpperCamelCase = loss_fct(logits.view(-1,self.num_labels ),labels.view(-1 ) )
# work with highway exits
__UpperCamelCase = []
for highway_exit in outputs[-1]:
__UpperCamelCase = highway_exit[0]
if not self.training:
highway_logits_all.append(A_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__UpperCamelCase = MSELoss()
__UpperCamelCase = loss_fct(highway_logits.view(-1 ),labels.view(-1 ) )
else:
__UpperCamelCase = CrossEntropyLoss()
__UpperCamelCase = loss_fct(highway_logits.view(-1,self.num_labels ),labels.view(-1 ) )
highway_losses.append(A_ )
if train_highway:
__UpperCamelCase = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__UpperCamelCase = (loss,) + outputs
if not self.training:
__UpperCamelCase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__UpperCamelCase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 310
| 0
|
"""simple docstring"""
from __future__ import annotations
import math
def _snake_case ( _snake_case : int , _snake_case : int , _snake_case : bool , _snake_case : list[int] , _snake_case : float ):
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if not scores:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , _snake_case , _snake_case , _snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , _snake_case , _snake_case , _snake_case ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , _snake_case , _snake_case , _snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , _snake_case , _snake_case , _snake_case ) , )
)
def _snake_case ( ):
lowerCAmelCase : Optional[int] = [90, 23, 6, 33, 21, 65, 123, 34423]
lowerCAmelCase : Union[str, Any] = math.log(len(_snake_case ) , 2 )
print(f'''Optimal value : {minimax(0 , 0 , _snake_case , _snake_case , _snake_case )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 60
|
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCamelCase :
@staticmethod
def snake_case_ ( *A_: Optional[Any],**A_: Tuple ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __lowerCamelCase (unittest.TestCase ):
_lowercase = MODEL_FOR_OBJECT_DETECTION_MAPPING
def snake_case_ ( self: Dict,A_: Optional[int],A_: Tuple,A_: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = ObjectDetectionPipeline(model=A_,image_processor=A_ )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def snake_case_ ( self: int,A_: Any,A_: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = object_detector('./tests/fixtures/tests_samples/COCO/000000039769.png',threshold=0.0 )
self.assertGreater(len(A_ ),0 )
for detected_object in outputs:
self.assertEqual(
A_,{
'score': ANY(A_ ),
'label': ANY(A_ ),
'box': {'xmin': ANY(A_ ), 'ymin': ANY(A_ ), 'xmax': ANY(A_ ), 'ymax': ANY(A_ )},
},)
import datasets
__UpperCamelCase = datasets.load_dataset('hf-internal-testing/fixtures_image_utils','image',split='test' )
__UpperCamelCase = [
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
]
__UpperCamelCase = object_detector(A_,threshold=0.0 )
self.assertEqual(len(A_ ),len(A_ ) )
for outputs in batch_outputs:
self.assertGreater(len(A_ ),0 )
for detected_object in outputs:
self.assertEqual(
A_,{
'score': ANY(A_ ),
'label': ANY(A_ ),
'box': {'xmin': ANY(A_ ), 'ymin': ANY(A_ ), 'xmax': ANY(A_ ), 'ymax': ANY(A_ )},
},)
@require_tf
@unittest.skip('Object detection not implemented in TF' )
def snake_case_ ( self: str ):
'''simple docstring'''
pass
@require_torch
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = 'hf-internal-testing/tiny-detr-mobilenetsv3'
__UpperCamelCase = AutoModelForObjectDetection.from_pretrained(A_ )
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(A_ )
__UpperCamelCase = ObjectDetectionPipeline(model=A_,feature_extractor=A_ )
__UpperCamelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg',threshold=0.0 )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],)
__UpperCamelCase = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
],threshold=0.0,)
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
[
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
[
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
],)
@require_torch
@slow
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = 'facebook/detr-resnet-50'
__UpperCamelCase = AutoModelForObjectDetection.from_pretrained(A_ )
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(A_ )
__UpperCamelCase = ObjectDetectionPipeline(model=A_,feature_extractor=A_ )
__UpperCamelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],)
__UpperCamelCase = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
],)
@require_torch
@slow
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = 'facebook/detr-resnet-50'
__UpperCamelCase = pipeline('object-detection',model=A_ )
__UpperCamelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],)
__UpperCamelCase = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
],)
@require_torch
@slow
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = 0.9_9_8_5
__UpperCamelCase = 'facebook/detr-resnet-50'
__UpperCamelCase = pipeline('object-detection',model=A_ )
__UpperCamelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg',threshold=A_ )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],)
@require_torch
@require_pytesseract
@slow
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = 'Narsil/layoutlmv3-finetuned-funsd'
__UpperCamelCase = 0.9_9_9_3
__UpperCamelCase = pipeline('object-detection',model=A_,threshold=A_ )
__UpperCamelCase = object_detector(
'https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png' )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.9_9_9_3, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
{'score': 0.9_9_9_3, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
],)
| 310
| 0
|
"""simple docstring"""
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 61
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/config.json''',
'''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/config.json''',
'''xlm-roberta-large-finetuned-conll02-dutch''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll02-spanish''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-english''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-german''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'''
),
}
class __lowerCamelCase (_a ):
_lowercase = """xlm-roberta"""
def __init__( self: Union[str, Any],A_: Union[str, Any]=3_0522,A_: Dict=768,A_: Union[str, Any]=12,A_: Any=12,A_: str=3072,A_: Union[str, Any]="gelu",A_: str=0.1,A_: Optional[int]=0.1,A_: List[Any]=512,A_: Optional[Any]=2,A_: Dict=0.0_2,A_: List[Any]=1E-12,A_: Optional[int]=1,A_: str=0,A_: str=2,A_: Optional[Any]="absolute",A_: Union[str, Any]=True,A_: int=None,**A_: Optional[Any],):
'''simple docstring'''
super().__init__(pad_token_id=A_,bos_token_id=A_,eos_token_id=A_,**A_ )
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_act
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = position_embedding_type
__UpperCamelCase = use_cache
__UpperCamelCase = classifier_dropout
class __lowerCamelCase (_a ):
@property
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
__UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 310
| 0
|
import sys
from collections import defaultdict
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self ) -> List[str]:
__UpperCamelCase =[]
def _a ( self , A_ ) -> List[Any]:
return self.node_position[vertex]
def _a ( self , A_ , A_ ) -> List[Any]:
__UpperCamelCase =pos
def _a ( self , A_ , A_ , A_ , A_ ) -> List[Any]:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__UpperCamelCase =2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__UpperCamelCase =2 * start + 1
else:
__UpperCamelCase =2 * start + 2
if heap[smallest_child] < heap[start]:
__UpperCamelCase , __UpperCamelCase =heap[smallest_child], positions[smallest_child]
__UpperCamelCase , __UpperCamelCase =(
heap[start],
positions[start],
)
__UpperCamelCase , __UpperCamelCase =temp, tempa
__UpperCamelCase =self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , A_ )
self.top_to_bottom(A_ , A_ , A_ , A_ )
def _a ( self , A_ , A_ , A_ , A_ ) -> int:
__UpperCamelCase =position[index]
while index != 0:
__UpperCamelCase =int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__UpperCamelCase =heap[parent]
__UpperCamelCase =position[parent]
self.set_position(position[parent] , A_ )
else:
__UpperCamelCase =val
__UpperCamelCase =temp
self.set_position(A_ , A_ )
break
__UpperCamelCase =parent
else:
__UpperCamelCase =val
__UpperCamelCase =temp
self.set_position(A_ , 0 )
def _a ( self , A_ , A_ ) -> List[str]:
__UpperCamelCase =len(A_ ) // 2 - 1
for i in range(A_ , -1 , -1 ):
self.top_to_bottom(A_ , A_ , len(A_ ) , A_ )
def _a ( self , A_ , A_ ) -> str:
__UpperCamelCase =positions[0]
__UpperCamelCase =sys.maxsize
self.top_to_bottom(A_ , 0 , len(A_ ) , A_ )
return temp
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Dict ):
__UpperCamelCase =Heap()
__UpperCamelCase =[0] * len(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[-1] * len(SCREAMING_SNAKE_CASE__ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__UpperCamelCase =[] # Heap of Distance of vertices from their neighboring vertex
__UpperCamelCase =[]
for vertex in range(len(SCREAMING_SNAKE_CASE__ ) ):
distance_tv.append(sys.maxsize )
positions.append(SCREAMING_SNAKE_CASE__ )
heap.node_position.append(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[]
__UpperCamelCase =1
__UpperCamelCase =sys.maxsize
for neighbor, distance in adjacency_list[0]:
__UpperCamelCase =0
__UpperCamelCase =distance
heap.heapify(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for _ in range(1 , len(SCREAMING_SNAKE_CASE__ ) ):
__UpperCamelCase =heap.delete_minimum(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__UpperCamelCase =1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(SCREAMING_SNAKE_CASE__ )]
):
__UpperCamelCase =distance
heap.bottom_to_top(
SCREAMING_SNAKE_CASE__ , heap.get_position(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
_A = int(input('Enter number of edges: ').strip())
_A = defaultdict(list)
for _ in range(edges_number):
_A = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 62
|
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__snake_case = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __lowerCamelCase (_a ):
_lowercase = field(default=_a , metadata={"""help""": """Whether to use SortishSampler or not."""} )
_lowercase = field(
default=_a , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
_lowercase = field(
default=_a , metadata={
"""help""": (
"""The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `max_length` value of the model configuration."""
)
} , )
_lowercase = field(
default=_a , metadata={
"""help""": (
"""The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `num_beams` value of the model configuration."""
)
} , )
_lowercase = field(
default=_a , metadata={
"""help""": """Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."""
} , )
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = super().to_dict()
for k, v in d.items():
if isinstance(A_,A_ ):
__UpperCamelCase = v.to_dict()
return d
| 310
| 0
|
'''simple docstring'''
from timeit import timeit
lowerCAmelCase_ : int = {
'MALAYALAM': True,
'String': False,
'rotor': True,
'level': True,
'A': True,
'BB': True,
'ABC': False,
'amanaplanacanalpanama': True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def _lowerCamelCase ( lowercase : str ) -> bool:
_a = 0
_a = len(lowercase ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def _lowerCamelCase ( lowercase : str ) -> bool:
_a = len(lowercase ) // 2
_a = len(lowercase )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(lowercase ) )
def _lowerCamelCase ( lowercase : str ) -> bool:
if len(lowercase ) <= 2:
return True
if s[0] == s[len(lowercase ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def _lowerCamelCase ( lowercase : str ) -> bool:
return s == s[::-1]
def _lowerCamelCase ( lowercase : str ) -> None:
_a = F'all({name}(key) is value for key, value in test_data.items())'
_a = F'from __main__ import test_data, {name}'
_a = 50_0000
_a = timeit(stmt=lowercase , setup=lowercase , number=lowercase )
print(F'{name:<35} finished {number:,} runs in {result:.5f} seconds' )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(f"""{key:21} {value}""")
print('a man a plan a canal panama')
# finished 500,000 runs in 0.46793 seconds
benchmark_function('is_palindrome_slice')
# finished 500,000 runs in 0.85234 seconds
benchmark_function('is_palindrome')
# finished 500,000 runs in 1.32028 seconds
benchmark_function('is_palindrome_recursive')
# finished 500,000 runs in 2.08679 seconds
benchmark_function('is_palindrome_traversal')
| 63
|
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def _A ( _lowercase ) -> Dict:
"""simple docstring"""
if is_torch_version('<' , '2.0.0' ) or not hasattr(_lowercase , '_dynamo' ):
return False
return isinstance(_lowercase , torch._dynamo.eval_frame.OptimizedModule )
def _A ( _lowercase , _lowercase = True ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
__UpperCamelCase = is_compiled_module(_lowercase )
if is_compiled:
__UpperCamelCase = model
__UpperCamelCase = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(_lowercase , _lowercase ):
__UpperCamelCase = model.module
if not keep_fpaa_wrapper:
__UpperCamelCase = getattr(_lowercase , 'forward' )
__UpperCamelCase = model.__dict__.pop('_original_forward' , _lowercase )
if original_forward is not None:
while hasattr(_lowercase , '__wrapped__' ):
__UpperCamelCase = forward.__wrapped__
if forward == original_forward:
break
__UpperCamelCase = forward
if getattr(_lowercase , '_converted_to_transformer_engine' , _lowercase ):
convert_model(_lowercase , to_transformer_engine=_lowercase )
if is_compiled:
__UpperCamelCase = model
__UpperCamelCase = compiled_model
return model
def _A ( ) -> Any:
"""simple docstring"""
PartialState().wait_for_everyone()
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
if PartialState().distributed_type == DistributedType.TPU:
xm.save(_lowercase , _lowercase )
elif PartialState().local_process_index == 0:
torch.save(_lowercase , _lowercase )
@contextmanager
def _A ( **_lowercase ) -> Union[str, Any]:
"""simple docstring"""
for key, value in kwargs.items():
__UpperCamelCase = str(_lowercase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def _A ( _lowercase ) -> Tuple:
"""simple docstring"""
if not hasattr(_lowercase , '__qualname__' ) and not hasattr(_lowercase , '__name__' ):
__UpperCamelCase = getattr(_lowercase , '__class__' , _lowercase )
if hasattr(_lowercase , '__qualname__' ):
return obj.__qualname__
if hasattr(_lowercase , '__name__' ):
return obj.__name__
return str(_lowercase )
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
for key, value in source.items():
if isinstance(_lowercase , _lowercase ):
__UpperCamelCase = destination.setdefault(_lowercase , {} )
merge_dicts(_lowercase , _lowercase )
else:
__UpperCamelCase = value
return destination
def _A ( _lowercase = None ) -> bool:
"""simple docstring"""
if port is None:
__UpperCamelCase = 2_95_00
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 310
| 0
|
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
A_ = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = field(default=__a , metadata={"help": "Whether to use SortishSampler or not."} )
lowercase__ = field(
default=__a , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
lowercase__ = field(
default=__a , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
lowercase__ = field(
default=__a , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
lowercase__ = field(
default=__a , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Tuple = super().to_dict()
for k, v in d.items():
if isinstance(a_, a_ ):
_snake_case : int = v.to_dict()
return d
| 64
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__snake_case = logging.getLogger(__name__)
def _A ( _lowercase , _lowercase ) -> Optional[int]:
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class __lowerCamelCase :
_lowercase = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class __lowerCamelCase :
_lowercase = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} )
_lowercase = field(metadata={"""help""": """Should contain the data files for the task."""} )
_lowercase = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_lowercase = field(
default=_a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _A ( ) -> str:
"""simple docstring"""
__UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _lowercase )
# Set seed
set_seed(training_args.seed )
try:
__UpperCamelCase = processors[data_args.task_name]()
__UpperCamelCase = processor.get_labels()
__UpperCamelCase = len(_lowercase )
except KeyError:
raise ValueError('Task not found: %s' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowercase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
__UpperCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__UpperCamelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , )
# Get datasets
__UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_lowercase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_lowercase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(_lowercase ) -> Dict:
__UpperCamelCase = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(_lowercase , p.label_ids )}
# Data collator
__UpperCamelCase = DataCollatorWithPadding(_lowercase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__UpperCamelCase = Trainer(
model=_lowercase , args=_lowercase , train_dataset=_lowercase , eval_dataset=_lowercase , compute_metrics=_lowercase , data_collator=_lowercase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__UpperCamelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__UpperCamelCase = trainer.evaluate()
__UpperCamelCase = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_master():
with open(_lowercase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , _lowercase , _lowercase )
writer.write('%s = %s\n' % (key, value) )
results.update(_lowercase )
return results
def _A ( _lowercase ) -> List[Any]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 310
| 0
|
from __future__ import annotations
def lowerCAmelCase_ ( __A = 4 ) -> list[list[int]]:
'''simple docstring'''
UpperCAmelCase__ = abs(__A ) or 4
return [[1 + x + y * row_size for x in range(__A )] for y in range(__A )]
def lowerCAmelCase_ ( __A ) -> list[list[int]]:
'''simple docstring'''
return reverse_row(transpose(__A ) )
# OR.. transpose(reverse_column(matrix))
def lowerCAmelCase_ ( __A ) -> list[list[int]]:
'''simple docstring'''
return reverse_row(reverse_column(__A ) )
# OR.. reverse_column(reverse_row(matrix))
def lowerCAmelCase_ ( __A ) -> list[list[int]]:
'''simple docstring'''
return reverse_column(transpose(__A ) )
# OR.. transpose(reverse_row(matrix))
def lowerCAmelCase_ ( __A ) -> list[list[int]]:
'''simple docstring'''
UpperCAmelCase__ = [list(__A ) for x in zip(*__A )]
return matrix
def lowerCAmelCase_ ( __A ) -> list[list[int]]:
'''simple docstring'''
UpperCAmelCase__ = matrix[::-1]
return matrix
def lowerCAmelCase_ ( __A ) -> list[list[int]]:
'''simple docstring'''
UpperCAmelCase__ = [x[::-1] for x in matrix]
return matrix
def lowerCAmelCase_ ( __A ) -> None:
'''simple docstring'''
for i in matrix:
print(*__A )
if __name__ == "__main__":
UpperCamelCase__ = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 90 counterclockwise:\n')
print_matrix(rotate_aa(matrix))
UpperCamelCase__ = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 180:\n')
print_matrix(rotate_aaa(matrix))
UpperCamelCase__ = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 270 counterclockwise:\n')
print_matrix(rotate_aaa(matrix))
| 65
|
import os
def _A ( ) -> Tuple:
"""simple docstring"""
with open(os.path.dirname(_lowercase ) + '/p022_names.txt' ) as file:
__UpperCamelCase = str(file.readlines()[0] )
__UpperCamelCase = names.replace('"' , '' ).split(',' )
names.sort()
__UpperCamelCase = 0
__UpperCamelCase = 0
for i, name in enumerate(_lowercase ):
for letter in name:
name_score += ord(_lowercase ) - 64
total_score += (i + 1) * name_score
__UpperCamelCase = 0
return total_score
if __name__ == "__main__":
print(solution())
| 310
| 0
|
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"BAAI/AltCLIP": "https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
_A : Dict = """altclip_text_model"""
def __init__( self: Tuple , snake_case: List[Any]=250_002 , snake_case: Dict=1_024 , snake_case: Tuple=24 , snake_case: int=16 , snake_case: Any=4_096 , snake_case: Optional[int]="gelu" , snake_case: List[Any]=0.1 , snake_case: Union[str, Any]=0.1 , snake_case: List[str]=514 , snake_case: List[Any]=1 , snake_case: Any=0.0_2 , snake_case: Optional[int]=0.0_2 , snake_case: Union[str, Any]=1E-05 , snake_case: List[Any]=1 , snake_case: Tuple=0 , snake_case: int=2 , snake_case: Dict="absolute" , snake_case: Optional[int]=True , snake_case: Tuple=768 , **snake_case: Union[str, Any] , ) -> Union[str, Any]:
super().__init__(pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , **snake_case )
snake_case_ :Dict = vocab_size
snake_case_ :str = hidden_size
snake_case_ :List[Any] = num_hidden_layers
snake_case_ :List[Any] = num_attention_heads
snake_case_ :Union[str, Any] = hidden_act
snake_case_ :Tuple = intermediate_size
snake_case_ :List[str] = hidden_dropout_prob
snake_case_ :int = attention_probs_dropout_prob
snake_case_ :int = max_position_embeddings
snake_case_ :Optional[int] = type_vocab_size
snake_case_ :Union[str, Any] = initializer_range
snake_case_ :str = initializer_factor
snake_case_ :List[str] = layer_norm_eps
snake_case_ :Tuple = position_embedding_type
snake_case_ :str = use_cache
snake_case_ :List[str] = project_dim
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
_A : Optional[Any] = """altclip_vision_model"""
def __init__( self: Dict , snake_case: Dict=768 , snake_case: List[Any]=3_072 , snake_case: int=512 , snake_case: List[Any]=12 , snake_case: List[str]=12 , snake_case: Optional[int]=3 , snake_case: int=224 , snake_case: Optional[Any]=32 , snake_case: Optional[int]="quick_gelu" , snake_case: Tuple=1E-5 , snake_case: Tuple=0.0 , snake_case: List[str]=0.0_2 , snake_case: Union[str, Any]=1.0 , **snake_case: str , ) -> Optional[Any]:
super().__init__(**snake_case )
snake_case_ :int = hidden_size
snake_case_ :Optional[int] = intermediate_size
snake_case_ :List[str] = projection_dim
snake_case_ :Any = num_hidden_layers
snake_case_ :Optional[Any] = num_attention_heads
snake_case_ :Union[str, Any] = num_channels
snake_case_ :int = patch_size
snake_case_ :List[Any] = image_size
snake_case_ :int = initializer_range
snake_case_ :Optional[Any] = initializer_factor
snake_case_ :str = attention_dropout
snake_case_ :List[Any] = layer_norm_eps
snake_case_ :List[str] = hidden_act
@classmethod
def lowerCAmelCase_ ( cls: List[str] , snake_case: Union[str, os.PathLike] , **snake_case: List[Any] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(snake_case )
snake_case_, snake_case_ :List[str] = cls.get_config_dict(snake_case , **snake_case )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get("""model_type""" ) == "altclip":
snake_case_ :Optional[Any] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(snake_case , **snake_case )
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
_A : str = """altclip"""
_A : Any = True
def __init__( self: int , snake_case: Any=None , snake_case: Optional[Any]=None , snake_case: str=768 , snake_case: int=2.6_5_9_2 , **snake_case: Union[str, Any] ) -> str:
# If `_config_dict` exist, we use them for the backward compatibility.
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
# of confusion!).
snake_case_ :str = kwargs.pop("""text_config_dict""" , snake_case )
snake_case_ :List[str] = kwargs.pop("""vision_config_dict""" , snake_case )
super().__init__(**snake_case )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
snake_case_ :Tuple = {}
# This is the complete result when using `text_config_dict`.
snake_case_ :str = AltCLIPTextConfig(**snake_case ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
snake_case_ :List[str] = (
f"""`{key}` is found in both `text_config_dict` and `text_config` but with different values. """
f"""The value `text_config_dict[\"{key}\"]` will be used instead."""
)
# If inferred from default argument values (just to be super careful)
else:
snake_case_ :str = (
f"""`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The """
f"""value `text_config[\"{key}\"]` will be overriden."""
)
logger.warning(snake_case )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
snake_case_ :Tuple = {}
# This is the complete result when using `vision_config_dict`.
snake_case_ :Dict = AltCLIPVisionConfig(**snake_case ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
snake_case_ :Union[str, Any] = {
str(snake_case ): value for key, value in _vision_config_dict["""id2label"""].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
snake_case_ :Dict = (
f"""`{key}` is found in both `vision_config_dict` and `vision_config` but with different """
f"""values. The value `vision_config_dict[\"{key}\"]` will be used instead."""
)
# If inferred from default argument values (just to be super careful)
else:
snake_case_ :Dict = (
f"""`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. """
f"""The value `vision_config[\"{key}\"]` will be overriden."""
)
logger.warning(snake_case )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
snake_case_ :Optional[Any] = {}
logger.info("""`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.""" )
if vision_config is None:
snake_case_ :List[str] = {}
logger.info("""`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.""" )
snake_case_ :Tuple = AltCLIPTextConfig(**snake_case )
snake_case_ :str = AltCLIPVisionConfig(**snake_case )
snake_case_ :int = projection_dim
snake_case_ :Union[str, Any] = logit_scale_init_value
snake_case_ :Dict = 1.0
@classmethod
def lowerCAmelCase_ ( cls: str , snake_case: AltCLIPTextConfig , snake_case: AltCLIPVisionConfig , **snake_case: int ) -> Union[str, Any]:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case )
def lowerCAmelCase_ ( self: int ) -> Tuple:
snake_case_ :str = copy.deepcopy(self.__dict__ )
snake_case_ :Any = self.text_config.to_dict()
snake_case_ :Optional[int] = self.vision_config.to_dict()
snake_case_ :List[Any] = self.__class__.model_type
return output
| 66
|
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def _A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=True , _lowercase="pt" ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = {'add_prefix_space': True} if isinstance(_lowercase , _lowercase ) and not line.startswith(' ' ) else {}
__UpperCamelCase = padding_side
return tokenizer(
[line] , max_length=_lowercase , padding='max_length' if pad_to_max_length else None , truncation=_lowercase , return_tensors=_lowercase , add_special_tokens=_lowercase , **_lowercase , )
def _A ( _lowercase , _lowercase , _lowercase=None , ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = input_ids.ne(_lowercase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCamelCase (_a ):
def __init__( self: List[str],A_: str,A_: List[str],A_: List[str],A_: List[str],A_: Tuple="train",A_: Any=None,A_: List[str]=None,A_: List[Any]=None,A_: int="",):
'''simple docstring'''
super().__init__()
__UpperCamelCase = Path(A_ ).joinpath(type_path + '.source' )
__UpperCamelCase = Path(A_ ).joinpath(type_path + '.target' )
__UpperCamelCase = self.get_char_lens(self.src_file )
__UpperCamelCase = max_source_length
__UpperCamelCase = max_target_length
assert min(self.src_lens ) > 0, F'''found empty line in {self.src_file}'''
__UpperCamelCase = tokenizer
__UpperCamelCase = prefix
if n_obs is not None:
__UpperCamelCase = self.src_lens[:n_obs]
__UpperCamelCase = src_lang
__UpperCamelCase = tgt_lang
def __len__( self: Optional[Any] ):
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self: int,A_: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = index + 1 # linecache starts at 1
__UpperCamelCase = self.prefix + linecache.getline(str(self.src_file ),A_ ).rstrip('\n' )
__UpperCamelCase = linecache.getline(str(self.tgt_file ),A_ ).rstrip('\n' )
assert source_line, F'''empty source line for index {index}'''
assert tgt_line, F'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer,A_ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__UpperCamelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer,A_ ) else self.tokenizer
)
__UpperCamelCase = self.tokenizer.generator if isinstance(self.tokenizer,A_ ) else self.tokenizer
__UpperCamelCase = encode_line(A_,A_,self.max_source_length,'right' )
__UpperCamelCase = encode_line(A_,A_,self.max_target_length,'right' )
__UpperCamelCase = source_inputs['input_ids'].squeeze()
__UpperCamelCase = target_inputs['input_ids'].squeeze()
__UpperCamelCase = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def snake_case_ ( A_: List[Any] ):
'''simple docstring'''
return [len(A_ ) for x in Path(A_ ).open().readlines()]
def snake_case_ ( self: Union[str, Any],A_: Any ):
'''simple docstring'''
__UpperCamelCase = torch.stack([x['input_ids'] for x in batch] )
__UpperCamelCase = torch.stack([x['attention_mask'] for x in batch] )
__UpperCamelCase = torch.stack([x['decoder_input_ids'] for x in batch] )
__UpperCamelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer,A_ )
else self.tokenizer.pad_token_id
)
__UpperCamelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer,A_ )
else self.tokenizer.pad_token_id
)
__UpperCamelCase = trim_batch(A_,A_ )
__UpperCamelCase, __UpperCamelCase = trim_batch(A_,A_,attention_mask=A_ )
__UpperCamelCase = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
__snake_case = getLogger(__name__)
def _A ( _lowercase ) -> Any:
"""simple docstring"""
return list(itertools.chain.from_iterable(_lowercase ) )
def _A ( _lowercase ) -> None:
"""simple docstring"""
__UpperCamelCase = get_git_info()
save_json(_lowercase , os.path.join(_lowercase , 'git_log.json' ) )
def _A ( _lowercase , _lowercase , _lowercase=4 , **_lowercase ) -> List[Any]:
"""simple docstring"""
with open(_lowercase , 'w' ) as f:
json.dump(_lowercase , _lowercase , indent=_lowercase , **_lowercase )
def _A ( _lowercase ) -> Union[str, Any]:
"""simple docstring"""
with open(_lowercase ) as f:
return json.load(_lowercase )
def _A ( ) -> Dict:
"""simple docstring"""
__UpperCamelCase = git.Repo(search_parent_directories=_lowercase )
__UpperCamelCase = {
'repo_id': str(_lowercase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def _A ( _lowercase , _lowercase ) -> List:
"""simple docstring"""
return list(map(_lowercase , _lowercase ) )
def _A ( _lowercase , _lowercase ) -> Tuple:
"""simple docstring"""
with open(_lowercase , 'wb' ) as f:
return pickle.dump(_lowercase , _lowercase )
def _A ( _lowercase ) -> List[Any]:
"""simple docstring"""
def remove_articles(_lowercase ):
return re.sub(r'\b(a|an|the)\b' , ' ' , _lowercase )
def white_space_fix(_lowercase ):
return " ".join(text.split() )
def remove_punc(_lowercase ):
__UpperCamelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_lowercase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_lowercase ) ) ) )
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = normalize_answer(_lowercase ).split()
__UpperCamelCase = normalize_answer(_lowercase ).split()
__UpperCamelCase = Counter(_lowercase ) & Counter(_lowercase )
__UpperCamelCase = sum(common.values() )
if num_same == 0:
return 0
__UpperCamelCase = 1.0 * num_same / len(_lowercase )
__UpperCamelCase = 1.0 * num_same / len(_lowercase )
__UpperCamelCase = (2 * precision * recall) / (precision + recall)
return fa
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
return normalize_answer(_lowercase ) == normalize_answer(_lowercase )
def _A ( _lowercase , _lowercase ) -> Dict:
"""simple docstring"""
assert len(_lowercase ) == len(_lowercase )
__UpperCamelCase = 0
for hypo, pred in zip(_lowercase , _lowercase ):
em += exact_match_score(_lowercase , _lowercase )
if len(_lowercase ) > 0:
em /= len(_lowercase )
return {"em": em}
def _A ( _lowercase ) -> Optional[Any]:
"""simple docstring"""
return model_prefix.startswith('rag' )
def _A ( _lowercase , _lowercase , _lowercase ) -> Dict:
"""simple docstring"""
__UpperCamelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__UpperCamelCase = 'dropout_rate'
for p in extra_params:
if getattr(_lowercase , _lowercase , _lowercase ):
if not hasattr(_lowercase , _lowercase ) and not hasattr(_lowercase , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(_lowercase ) )
delattr(_lowercase , _lowercase )
continue
__UpperCamelCase = p if hasattr(_lowercase , _lowercase ) else equivalent_param[p]
setattr(_lowercase , _lowercase , getattr(_lowercase , _lowercase ) )
delattr(_lowercase , _lowercase )
return hparams, config
| 310
| 0
|
'''simple docstring'''
from __future__ import annotations
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = False , ) -> tuple[int, float, str]:
__lowerCamelCase = cipher_alphabet or [chr(UpperCamelCase__ ) for i in range(97 , 1_23 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
__lowerCamelCase = {
'''a''': 0.0_8_4_9_7,
'''b''': 0.0_1_4_9_2,
'''c''': 0.0_2_2_0_2,
'''d''': 0.0_4_2_5_3,
'''e''': 0.1_1_1_6_2,
'''f''': 0.0_2_2_2_8,
'''g''': 0.0_2_0_1_5,
'''h''': 0.0_6_0_9_4,
'''i''': 0.0_7_5_4_6,
'''j''': 0.0_0_1_5_3,
'''k''': 0.0_1_2_9_2,
'''l''': 0.0_4_0_2_5,
'''m''': 0.0_2_4_0_6,
'''n''': 0.0_6_7_4_9,
'''o''': 0.0_7_5_0_7,
'''p''': 0.0_1_9_2_9,
'''q''': 0.0_0_0_9_5,
'''r''': 0.0_7_5_8_7,
'''s''': 0.0_6_3_2_7,
'''t''': 0.0_9_3_5_6,
'''u''': 0.0_2_7_5_8,
'''v''': 0.0_0_9_7_8,
'''w''': 0.0_2_5_6_0,
'''x''': 0.0_0_1_5_0,
'''y''': 0.0_1_9_9_4,
'''z''': 0.0_0_0_7_7,
}
else:
# Custom frequencies dictionary
__lowerCamelCase = frequencies_dict
if not case_sensitive:
__lowerCamelCase = ciphertext.lower()
# Chi squared statistic values
__lowerCamelCase = {}
# cycle through all of the shifts
for shift in range(len(UpperCamelCase__ ) ):
__lowerCamelCase = ''''''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
__lowerCamelCase = (alphabet_letters.index(letter.lower() ) - shift) % len(
UpperCamelCase__ )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
__lowerCamelCase = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
__lowerCamelCase = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
__lowerCamelCase = decrypted_with_shift.lower().count(UpperCamelCase__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
__lowerCamelCase = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
__lowerCamelCase = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
__lowerCamelCase = decrypted_with_shift.count(UpperCamelCase__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
__lowerCamelCase = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
__lowerCamelCase = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
__lowerCamelCase = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(UpperCamelCase__ ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
__lowerCamelCase = min(
UpperCamelCase__ , key=UpperCamelCase__ , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 67
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 310
| 0
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class a__ :
"""simple docstring"""
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None # sigma(t_i)
@classmethod
def UpperCamelCase ( cls ) -> Any:
'''simple docstring'''
return cls()
@dataclass
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = 42
class a__ ( snake_case , snake_case ):
"""simple docstring"""
@property
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
return True
@register_to_config
def __init__( self , lowercase = 0.02 , lowercase = 100 , lowercase = 1.007 , lowercase = 80 , lowercase = 0.05 , lowercase = 50 , ) -> Any:
'''simple docstring'''
pass
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
return KarrasVeSchedulerState.create()
def UpperCamelCase ( self , lowercase , lowercase , lowercase = () ) -> KarrasVeSchedulerState:
'''simple docstring'''
A__ = jnp.arange(0 , lowercase )[::-1].copy()
A__ = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=lowercase , schedule=jnp.array(lowercase , dtype=jnp.floataa ) , timesteps=lowercase , )
def UpperCamelCase ( self , lowercase , lowercase , lowercase , lowercase , ) -> Tuple[jnp.ndarray, float]:
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
A__ = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
A__ = 0
# sample eps ~ N(0, S_noise^2 * I)
A__ = random.split(lowercase , num=1 )
A__ = self.config.s_noise * random.normal(key=lowercase , shape=sample.shape )
A__ = sigma + gamma * sigma
A__ = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def UpperCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
'''simple docstring'''
A__ = sample_hat + sigma_hat * model_output
A__ = (sample_hat - pred_original_sample) / sigma_hat
A__ = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=lowercase , derivative=lowercase , state=lowercase )
def UpperCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
'''simple docstring'''
A__ = sample_prev + sigma_prev * model_output
A__ = (sample_prev - pred_original_sample) / sigma_prev
A__ = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=lowercase , derivative=lowercase , state=lowercase )
def UpperCamelCase ( self , lowercase , lowercase , lowercase , lowercase ) -> Tuple:
'''simple docstring'''
raise NotImplementedError()
| 68
|
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
__snake_case = '''https://www.indeed.co.in/jobs?q=mobile+app+development&l='''
def _A ( _lowercase = "mumbai" ) -> Generator[tuple[str, str], None, None]:
"""simple docstring"""
__UpperCamelCase = BeautifulSoup(requests.get(url + location ).content , 'html.parser' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('div' , attrs={'data-tn-component': 'organicJob'} ):
__UpperCamelCase = job.find('a' , attrs={'data-tn-element': 'jobTitle'} ).text.strip()
__UpperCamelCase = job.find('span' , {'class': 'company'} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('''Bangalore'''), 1):
print(f"""Job {i:>2} is {job[0]} at {job[1]}""")
| 310
| 0
|
"""simple docstring"""
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=0 ) -> List[str]:
return sorted(UpperCAmelCase , key=lambda UpperCAmelCase : x[column] )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=float('inf' ) ) -> Any:
for i in range(points_counts - 1 ):
for j in range(i + 1 , UpperCAmelCase ):
snake_case_ = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
snake_case_ = current_dis
return min_dis
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=float('inf' ) ) -> Optional[int]:
for i in range(min(6 , points_counts - 1 ) , UpperCAmelCase ):
for j in range(max(0 , i - 6 ) , UpperCAmelCase ):
snake_case_ = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
snake_case_ = current_dis
return min_dis
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
# base case
if points_counts <= 3:
return dis_between_closest_pair(UpperCAmelCase , UpperCAmelCase )
# recursion
snake_case_ = points_counts // 2
snake_case_ = closest_pair_of_points_sqr(
UpperCAmelCase , points_sorted_on_y[:mid] , UpperCAmelCase )
snake_case_ = closest_pair_of_points_sqr(
UpperCAmelCase , points_sorted_on_y[mid:] , points_counts - mid )
snake_case_ = min(UpperCAmelCase , UpperCAmelCase )
snake_case_ = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(UpperCAmelCase )
snake_case_ = dis_between_closest_in_strip(
UpperCAmelCase , len(UpperCAmelCase ) , UpperCAmelCase )
return min(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Dict:
snake_case_ = column_based_sort(UpperCAmelCase , column=0 )
snake_case_ = column_based_sort(UpperCAmelCase , column=1 )
return (
closest_pair_of_points_sqr(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
) ** 0.5
if __name__ == "__main__":
__UpperCamelCase = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print('''Distance:''', closest_pair_of_points(points, len(points)))
| 69
|
def _A ( _lowercase ) -> list:
"""simple docstring"""
def merge(_lowercase , _lowercase ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(_lowercase ) <= 1:
return collection
__UpperCamelCase = len(_lowercase ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case = input('''Enter numbers separated by a comma:\n''').strip()
__snake_case = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 310
| 0
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A__ : List[Any] =logging.get_logger(__name__)
A__ : Any =torch.device('''cpu''')
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_lowerCAmelCase = Image.open(requests.get(lowerCAmelCase , stream=lowerCAmelCase ).raw )
return im
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.17_03e00, 2.11_07e00, -2.08_11e00, 8.86_85e-01, 2.43_60e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.96_36e-01, 2.34_78e-01, -1.69_63e00, -1.73_81e00, -8.63_37e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.27_68e-01, -4.74_29e-01, -1.08_97e00, -1.02_48e00, 3.55_23e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.53_30e-01, 2.42_11e-01, -6.01_85e-01, -8.27_89e-01, -6.04_46e-02] )
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = dct.pop(lowerCAmelCase )
_lowerCAmelCase = val
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = []
for k in state_dict.keys():
_lowerCAmelCase = k
if ".pwconv" in k:
_lowerCAmelCase = k_new.replace(""".pwconv""" , """.point_wise_conv""" )
if ".dwconv" in k:
_lowerCAmelCase = k_new.replace(""".dwconv""" , """.depth_wise_conv""" )
if ".Proj." in k:
_lowerCAmelCase = k_new.replace(""".Proj.""" , """.proj.""" )
if "patch_embed" in k_new:
_lowerCAmelCase = k_new.replace("""patch_embed""" , """swiftformer.patch_embed.patch_embedding""" )
if "network" in k_new:
_lowerCAmelCase = k_new.split(""".""" )
if ls[2].isdigit():
_lowerCAmelCase = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] )
else:
_lowerCAmelCase = k_new.replace("""network""" , """swiftformer.encoder.network""" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
_lowerCAmelCase = 10_00
_lowerCAmelCase = """huggingface/label-files"""
_lowerCAmelCase = """imagenet-1k-id2label.json"""
_lowerCAmelCase = json.load(open(hf_hub_download(lowerCAmelCase , lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
_lowerCAmelCase = {int(lowerCAmelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase = idalabel
_lowerCAmelCase = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
_lowerCAmelCase = [3, 3, 6, 4]
_lowerCAmelCase = [48, 56, 1_12, 2_20]
elif swiftformer_name == "swiftformer_s":
_lowerCAmelCase = [3, 3, 9, 6]
_lowerCAmelCase = [48, 64, 1_68, 2_24]
elif swiftformer_name == "swiftformer_l1":
_lowerCAmelCase = [4, 3, 10, 5]
_lowerCAmelCase = [48, 96, 1_92, 3_84]
elif swiftformer_name == "swiftformer_l3":
_lowerCAmelCase = [4, 4, 12, 6]
_lowerCAmelCase = [64, 1_28, 3_20, 5_12]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("""https""" ):
_lowerCAmelCase = torch.hub.load_state_dict_from_url(lowerCAmelCase , map_location="""cpu""" , check_hash=lowerCAmelCase )
else:
_lowerCAmelCase = torch.load(lowerCAmelCase , map_location="""cpu""" )
_lowerCAmelCase = checkpoint
_lowerCAmelCase = create_rename_keys(lowerCAmelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# load HuggingFace model
_lowerCAmelCase = SwiftFormerForImageClassification(lowerCAmelCase ).eval()
hf_model.load_state_dict(lowerCAmelCase )
# prepare test inputs
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = ViTImageProcessor.from_pretrained("""preprocessor_config""" )
_lowerCAmelCase = processor(images=lowerCAmelCase , return_tensors="""pt""" )
# compare outputs from both models
_lowerCAmelCase = get_expected_output(lowerCAmelCase )
_lowerCAmelCase = hf_model(inputs["""pixel_values"""] ).logits
assert hf_logits.shape == torch.Size([1, 10_00] )
assert torch.allclose(hf_logits[0, 0:5] , lowerCAmelCase , atol=1e-3 )
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase )
print(f"Saving model {swiftformer_name} to {pytorch_dump_folder_path}" )
hf_model.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
A__ : str =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swiftformer_name''',
default='''swiftformer_xs''',
choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''],
type=str,
help='''Name of the SwiftFormer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''./converted_outputs/''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''')
A__ : Tuple =parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 70
|
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class __lowerCamelCase (_a ):
_lowercase = 0
_lowercase = False
_lowercase = 3.0
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: Any ):
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs(),{} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs(),{'a': 2} )
self.assertDictEqual(MockClass(a=2,b=A_ ).to_kwargs(),{'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2,c=2.2_5 ).to_kwargs(),{'a': 2, 'c': 2.2_5} )
@require_cuda
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = GradScalerKwargs(init_scale=1024,growth_factor=2 )
AcceleratorState._reset_state()
__UpperCamelCase = Accelerator(mixed_precision='fp16',kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
__UpperCamelCase = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale,1_0_2_4.0 )
self.assertEqual(scaler._growth_factor,2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor,0.5 )
self.assertEqual(scaler._growth_interval,2000 )
self.assertEqual(scaler._enabled,A_ )
@require_multi_gpu
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = ['torchrun', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(A_,env=os.environ.copy() )
if __name__ == "__main__":
__snake_case = DistributedDataParallelKwargs(bucket_cap_mb=1_5, find_unused_parameters=True)
__snake_case = Accelerator(kwargs_handlers=[ddp_scaler])
__snake_case = torch.nn.Linear(1_0_0, 2_0_0)
__snake_case = accelerator.prepare(model)
# Check the values changed in kwargs
__snake_case = ''''''
__snake_case = model.bucket_bytes_cap // (1_0_2_4 * 1_0_2_4)
if observed_bucket_cap_map != 1_5:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 310
| 0
|
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : int =["""image_processor""", """tokenizer"""]
UpperCamelCase__ : Any ="""BridgeTowerImageProcessor"""
UpperCamelCase__ : List[str] =("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
def __call__( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = True , lowerCamelCase__ = False , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = 0 , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = True , lowerCamelCase__ = None , **lowerCamelCase__ , ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] =self.tokenizer(
text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
# add pixel_values + pixel_mask
__UpperCamelCase : Tuple =self.image_processor(
lowerCamelCase__ , return_tensors=lowerCamelCase__ , do_normalize=lowerCamelCase__ , do_center_crop=lowerCamelCase__ , **lowerCamelCase__ )
encoding.update(lowerCamelCase__ )
return encoding
def __lowercase ( self , *lowerCamelCase__ , **lowerCamelCase__ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def __lowercase ( self , *lowerCamelCase__ , **lowerCamelCase__ ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@property
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] =self.tokenizer.model_input_names
__UpperCamelCase : Optional[int] =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 71
|
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class __lowerCamelCase (_a ):
_lowercase = ["""image_processor""", """tokenizer"""]
_lowercase = """OwlViTImageProcessor"""
_lowercase = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self: int,A_: Tuple=None,A_: int=None,**A_: int ):
'''simple docstring'''
__UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.',A_,)
__UpperCamelCase = kwargs.pop('feature_extractor' )
__UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(A_,A_ )
def __call__( self: str,A_: Dict=None,A_: Optional[int]=None,A_: Any=None,A_: Tuple="max_length",A_: int="np",**A_: Optional[Any] ):
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(A_,A_ ) or (isinstance(A_,A_ ) and not isinstance(text[0],A_ )):
__UpperCamelCase = [self.tokenizer(A_,padding=A_,return_tensors=A_,**A_ )]
elif isinstance(A_,A_ ) and isinstance(text[0],A_ ):
__UpperCamelCase = []
# Maximum number of queries across batch
__UpperCamelCase = max([len(A_ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(A_ ) != max_num_queries:
__UpperCamelCase = t + [' '] * (max_num_queries - len(A_ ))
__UpperCamelCase = self.tokenizer(A_,padding=A_,return_tensors=A_,**A_ )
encodings.append(A_ )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
__UpperCamelCase = np.concatenate([encoding['input_ids'] for encoding in encodings],axis=0 )
__UpperCamelCase = np.concatenate([encoding['attention_mask'] for encoding in encodings],axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__UpperCamelCase = jnp.concatenate([encoding['input_ids'] for encoding in encodings],axis=0 )
__UpperCamelCase = jnp.concatenate([encoding['attention_mask'] for encoding in encodings],axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__UpperCamelCase = torch.cat([encoding['input_ids'] for encoding in encodings],dim=0 )
__UpperCamelCase = torch.cat([encoding['attention_mask'] for encoding in encodings],dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__UpperCamelCase = tf.stack([encoding['input_ids'] for encoding in encodings],axis=0 )
__UpperCamelCase = tf.stack([encoding['attention_mask'] for encoding in encodings],axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
__UpperCamelCase = BatchEncoding()
__UpperCamelCase = input_ids
__UpperCamelCase = attention_mask
if query_images is not None:
__UpperCamelCase = BatchEncoding()
__UpperCamelCase = self.image_processor(
A_,return_tensors=A_,**A_ ).pixel_values
__UpperCamelCase = query_pixel_values
if images is not None:
__UpperCamelCase = self.image_processor(A_,return_tensors=A_,**A_ )
if text is not None and images is not None:
__UpperCamelCase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__UpperCamelCase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**A_ ),tensor_type=A_ )
def snake_case_ ( self: Optional[int],*A_: int,**A_: List[Any] ):
'''simple docstring'''
return self.image_processor.post_process(*A_,**A_ )
def snake_case_ ( self: str,*A_: Optional[int],**A_: List[Any] ):
'''simple docstring'''
return self.image_processor.post_process_object_detection(*A_,**A_ )
def snake_case_ ( self: str,*A_: Tuple,**A_: int ):
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*A_,**A_ )
def snake_case_ ( self: List[str],*A_: str,**A_: List[Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*A_,**A_ )
def snake_case_ ( self: int,*A_: Any,**A_: Tuple ):
'''simple docstring'''
return self.tokenizer.decode(*A_,**A_ )
@property
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.',A_,)
return self.image_processor_class
@property
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.',A_,)
return self.image_processor
| 310
| 0
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __snake_case ( _lowercase):
snake_case__ : str = (
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
snake_case__ : Any = "CIDAS/clipseg-rd64-refined"
snake_case__ : Union[str, Any] = "image_segmenter"
snake_case__ : Union[str, Any] = CLIPSegForImageSegmentation
snake_case__ : int = ["image", "text"]
snake_case__ : List[str] = ["image"]
def __init__( self : str , *__lowerCAmelCase : Union[str, Any] , **__lowerCAmelCase : int ):
"""simple docstring"""
requires_backends(self , ['''vision'''] )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : "Image" , __lowerCAmelCase : str ):
"""simple docstring"""
return self.pre_processor(text=[label] , images=[image] , padding=__lowerCAmelCase , return_tensors='''pt''' )
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
with torch.no_grad():
_lowerCamelCase : List[Any] = self.model(**__lowerCAmelCase ).logits
return logits
def SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : str = outputs.cpu().detach().numpy()
_lowerCamelCase : Dict = 0
_lowerCamelCase : Any = 1
return Image.fromarray((array * 2_5_5).astype(np.uinta ) )
| 72
|
import math
def _A ( _lowercase ) -> int:
"""simple docstring"""
if not isinstance(_lowercase , _lowercase ):
__UpperCamelCase = f'''Input value of [number={number}] must be an integer'''
raise TypeError(_lowercase )
if number < 1:
__UpperCamelCase = f'''Input value of [number={number}] must be > 0'''
raise ValueError(_lowercase )
elif number == 1:
return 3
elif number == 2:
return 5
else:
__UpperCamelCase = int(math.log(number // 3 , 2 ) ) + 2
__UpperCamelCase = [3, 5]
__UpperCamelCase = 2
__UpperCamelCase = 3
for block in range(1 , _lowercase ):
for _ in range(_lowercase ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(1_1):
__snake_case = 0
try:
__snake_case = proth(number)
except ValueError:
print(f"""ValueError: there is no {number}th Proth number""")
continue
print(f"""The {number}th Proth number: {value}""")
| 310
| 0
|
from math import ceil
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
__lowerCamelCase : Optional[Any] = list(range(0 , lowerCamelCase__ ) )
__lowerCamelCase : Optional[Any] = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
__lowerCamelCase : Tuple = []
for i in device_map_blocks:
if device_map_blocks.count(lowerCamelCase__ ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(lowerCamelCase__ )
# Missing blocks
__lowerCamelCase : Tuple = [i for i in blocks if i not in device_map_blocks]
__lowerCamelCase : int = [i for i in device_map_blocks if i not in blocks]
if len(lowerCamelCase__ ) != 0:
raise ValueError(
'Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.'
' These attention blocks were specified more than once: ' + str(lowerCamelCase__ ) )
if len(lowerCamelCase__ ) != 0:
raise ValueError(
'There are attention blocks for this model that are not specified in the device_map. Add these attention '
'blocks to a device on the device_map: ' + str(lowerCamelCase__ ) )
if len(lowerCamelCase__ ) != 0:
raise ValueError(
'The device_map contains more attention blocks than this model has. Remove these from the device_map:'
+ str(lowerCamelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
__lowerCamelCase : Dict = list(range(lowerCamelCase__ ) )
__lowerCamelCase : Union[str, Any] = int(ceil(n_layers / len(lowerCamelCase__ ) ) )
__lowerCamelCase : Tuple = [layers[i : i + n_blocks] for i in range(0 , lowerCamelCase__ , lowerCamelCase__ )]
return dict(zip(lowerCamelCase__ , lowerCamelCase__ ) )
| 73
|
import torch
from transformers import AutoModel
class __lowerCamelCase (torch.nn.Module ):
def __init__( self: Union[str, Any],A_: Tuple="sayef/fsner-bert-base-uncased" ):
'''simple docstring'''
super(A_,self ).__init__()
__UpperCamelCase = AutoModel.from_pretrained(A_,return_dict=A_ )
__UpperCamelCase = torch.nn.CosineSimilarity(3,1E-08 )
__UpperCamelCase = torch.nn.Softmax(dim=1 )
def snake_case_ ( self: Tuple,**A_: Union[str, Any] ):
'''simple docstring'''
return self.bert(**A_ ).last_hidden_state
def snake_case_ ( self: Union[str, Any],A_: Union[str, Any] ):
'''simple docstring'''
return token_embeddings.sum(2,keepdim=A_ )
def snake_case_ ( self: List[str],A_: Dict,A_: Union[str, Any],A_: Union[str, Any]=1 ):
'''simple docstring'''
return self.softmax(T * self.cos(A_,A_ ) )
def snake_case_ ( self: Optional[int],A_: Union[str, Any],A_: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = W_supports['sizes'].tolist()
__UpperCamelCase = W_supports['start_token_id'].item()
__UpperCamelCase = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
__UpperCamelCase = self.BERT(**A_ )
__UpperCamelCase = self.BERT(**A_ )
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = W_supports['input_ids'] == start_token_id
__UpperCamelCase = W_supports['input_ids'] == end_token_id
for i, size in enumerate(A_ ):
if i == 0:
__UpperCamelCase = 0
else:
__UpperCamelCase = support_sizes[i - 1]
__UpperCamelCase = S[s : s + size][start_token_masks[s : s + size]]
__UpperCamelCase = S[s : s + size][end_token_masks[s : s + size]]
__UpperCamelCase = torch.matmul(q[i],s_start.T ).sum(1 ).softmax(0 )
__UpperCamelCase = torch.matmul(q[i],s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
__UpperCamelCase = torch.vstack((p_starts, p_start) )
__UpperCamelCase = torch.vstack((p_ends, p_end) )
else:
__UpperCamelCase = p_start
__UpperCamelCase = p_end
return p_starts, p_ends
| 310
| 0
|
"""simple docstring"""
import os
def _snake_case ( ):
A = os.path.dirname(os.path.realpath(snake_case__ ) )
A = os.path.join(snake_case__ , 'triangle.txt' )
with open(snake_case__ ) as f:
A = f.readlines()
A = []
for line in triangle:
A = []
for number in line.strip().split(' ' ):
numbers_from_line.append(int(snake_case__ ) )
a.append(snake_case__ )
for i in range(1 , len(snake_case__ ) ):
for j in range(len(a[i] ) ):
A = a[i - 1][j] if j != len(a[i - 1] ) else 0
A = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(snake_case__ , snake_case__ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 74
|
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = BioGptTokenizer
_lowercase = False
def snake_case_ ( self: Any ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCamelCase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
__UpperCamelCase = dict(zip(A_,range(len(A_ ) ) ) )
__UpperCamelCase = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
__UpperCamelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file,'w' ) as fp:
fp.write(json.dumps(A_ ) )
with open(self.merges_file,'w' ) as fp:
fp.write('\n'.join(A_ ) )
def snake_case_ ( self: Optional[int],A_: List[Any] ):
'''simple docstring'''
__UpperCamelCase = 'lower newer'
__UpperCamelCase = 'lower newer'
return input_text, output_text
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = BioGptTokenizer(self.vocab_file,self.merges_file )
__UpperCamelCase = 'lower'
__UpperCamelCase = ['low', 'er</w>']
__UpperCamelCase = tokenizer.tokenize(A_ )
self.assertListEqual(A_,A_ )
__UpperCamelCase = tokens + ['<unk>']
__UpperCamelCase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ),A_ )
@slow
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
__UpperCamelCase = tokenizer.encode('sequence builders',add_special_tokens=A_ )
__UpperCamelCase = tokenizer.encode('multi-sequence build',add_special_tokens=A_ )
__UpperCamelCase = tokenizer.build_inputs_with_special_tokens(A_ )
__UpperCamelCase = tokenizer.build_inputs_with_special_tokens(A_,A_ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 310
| 0
|
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def a_ ( __snake_case : Callable , __snake_case : float , __snake_case : float , __snake_case : float , __snake_case : float ) -> np.ndarray:
"""simple docstring"""
lowerCamelCase_ =int(np.ceil((x_end - xa) / step_size ) )
lowerCamelCase_ =np.zeros((n + 1,) )
lowerCamelCase_ =ya
lowerCamelCase_ =xa
for k in range(__snake_case ):
lowerCamelCase_ =y[k] + step_size * ode_func(__snake_case , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 75
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__snake_case = r'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(_a )
class __lowerCamelCase (_a ):
_lowercase = """rag"""
_lowercase = True
def __init__( self: Tuple,A_: Any=None,A_: Any=True,A_: List[Any]=None,A_: Optional[int]=None,A_: List[Any]=None,A_: str=None,A_: Union[str, Any]=None,A_: List[Any]=" / ",A_: Union[str, Any]=" // ",A_: List[Any]=5,A_: Optional[int]=300,A_: Tuple=768,A_: Tuple=8,A_: Optional[Any]="wiki_dpr",A_: int="train",A_: Union[str, Any]="compressed",A_: Optional[int]=None,A_: List[Any]=None,A_: List[str]=False,A_: List[str]=False,A_: str=0.0,A_: List[Any]=True,A_: Tuple=False,A_: int=False,A_: Dict=False,A_: Tuple=True,A_: int=None,**A_: Optional[int],):
'''simple docstring'''
super().__init__(
bos_token_id=A_,pad_token_id=A_,eos_token_id=A_,decoder_start_token_id=A_,forced_eos_token_id=A_,is_encoder_decoder=A_,prefix=A_,vocab_size=A_,**A_,)
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
__UpperCamelCase = kwargs.pop('question_encoder' )
__UpperCamelCase = question_encoder_config.pop('model_type' )
__UpperCamelCase = kwargs.pop('generator' )
__UpperCamelCase = decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
__UpperCamelCase = AutoConfig.for_model(A_,**A_ )
__UpperCamelCase = AutoConfig.for_model(A_,**A_ )
__UpperCamelCase = reduce_loss
__UpperCamelCase = label_smoothing
__UpperCamelCase = exclude_bos_score
__UpperCamelCase = do_marginalize
__UpperCamelCase = title_sep
__UpperCamelCase = doc_sep
__UpperCamelCase = n_docs
__UpperCamelCase = max_combined_length
__UpperCamelCase = dataset
__UpperCamelCase = dataset_split
__UpperCamelCase = index_name
__UpperCamelCase = retrieval_vector_size
__UpperCamelCase = retrieval_batch_size
__UpperCamelCase = passages_path
__UpperCamelCase = index_path
__UpperCamelCase = use_dummy_dataset
__UpperCamelCase = output_retrieved
__UpperCamelCase = do_deduplication
__UpperCamelCase = use_cache
if self.forced_eos_token_id is None:
__UpperCamelCase = getattr(self.generator,'forced_eos_token_id',A_ )
@classmethod
def snake_case_ ( cls: Any,A_: PretrainedConfig,A_: PretrainedConfig,**A_: int ):
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict(),generator=generator_config.to_dict(),**A_ )
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = copy.deepcopy(self.__dict__ )
__UpperCamelCase = self.question_encoder.to_dict()
__UpperCamelCase = self.generator.to_dict()
__UpperCamelCase = self.__class__.model_type
return output
| 310
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
'configuration_roberta': ['ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RobertaConfig', 'RobertaOnnxConfig'],
'tokenization_roberta': ['RobertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['RobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaForCausalLM',
'RobertaForMaskedLM',
'RobertaForMultipleChoice',
'RobertaForQuestionAnswering',
'RobertaForSequenceClassification',
'RobertaForTokenClassification',
'RobertaModel',
'RobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaForCausalLM',
'TFRobertaForMaskedLM',
'TFRobertaForMultipleChoice',
'TFRobertaForQuestionAnswering',
'TFRobertaForSequenceClassification',
'TFRobertaForTokenClassification',
'TFRobertaMainLayer',
'TFRobertaModel',
'TFRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'FlaxRobertaForCausalLM',
'FlaxRobertaForMaskedLM',
'FlaxRobertaForMultipleChoice',
'FlaxRobertaForQuestionAnswering',
'FlaxRobertaForSequenceClassification',
'FlaxRobertaForTokenClassification',
'FlaxRobertaModel',
'FlaxRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 76
|
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class __lowerCamelCase (_a ):
_lowercase = """M-CLIP"""
def __init__( self: int,A_: Any=1024,A_: Union[str, Any]=768,**A_: str ):
'''simple docstring'''
__UpperCamelCase = transformerDimSize
__UpperCamelCase = imageDimSize
super().__init__(**A_ )
class __lowerCamelCase (_a ):
_lowercase = MCLIPConfig
def __init__( self: int,A_: Optional[Any],*A_: List[str],**A_: Union[str, Any] ):
'''simple docstring'''
super().__init__(A_,*A_,**A_ )
__UpperCamelCase = XLMRobertaModel(A_ )
__UpperCamelCase = torch.nn.Linear(
in_features=config.transformerDimensions,out_features=config.numDims )
def snake_case_ ( self: Dict,A_: int,A_: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = self.transformer(input_ids=A_,attention_mask=A_ )[0]
__UpperCamelCase = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(A_ ), embs
| 310
| 0
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
lowerCamelCase__ : Union[str, Any] = ViTImageProcessor if is_vision_available() else None
@property
def _UpperCAmelCase ( self ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : Union[str, Any] = (3, 3_2, 1_2_8)
lowercase__ : Dict = tempfile.mkdtemp()
# fmt: off
lowercase__ : Tuple = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
lowercase__ : Union[str, Any] = dict(zip(a , range(len(a ) ) ) )
lowercase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(a ) + '\n' )
lowercase__ : Dict = {
'do_normalize': False,
'do_resize': True,
'image_processor_type': 'ViTImageProcessor',
'resample': 3,
'size': {'height': 3_2, 'width': 1_2_8},
}
lowercase__ : Dict = os.path.join(self.tmpdirname , a )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(a , a )
def _UpperCAmelCase ( self , **a ) -> Union[str, Any]:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **a )
def _UpperCAmelCase ( self , **a ) -> Any:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **a )
def _UpperCAmelCase ( self ) -> List[str]:
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : List[str] = np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )
lowercase__ : Union[str, Any] = Image.fromarray(np.moveaxis(a , 0 , -1 ) )
return image_input
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Optional[int] = self.get_tokenizer()
lowercase__ : Optional[Any] = self.get_image_processor()
lowercase__ : Optional[Any] = MgpstrProcessor(tokenizer=a , image_processor=a )
processor.save_pretrained(self.tmpdirname )
lowercase__ : Dict = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=a )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , a )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Tuple = self.get_tokenizer()
lowercase__ : Dict = self.get_image_processor()
lowercase__ : Optional[Any] = MgpstrProcessor(tokenizer=a , image_processor=a )
processor.save_pretrained(self.tmpdirname )
lowercase__ : List[str] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowercase__ : Optional[int] = self.get_image_processor(do_normalize=a , padding_value=1.0 )
lowercase__ : int = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=a , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , a )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : Tuple = self.get_image_processor()
lowercase__ : Tuple = self.get_tokenizer()
lowercase__ : Tuple = MgpstrProcessor(tokenizer=a , image_processor=a )
lowercase__ : str = self.prepare_image_inputs()
lowercase__ : List[str] = image_processor(a , return_tensors='np' )
lowercase__ : str = processor(images=a , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Dict = self.get_image_processor()
lowercase__ : Any = self.get_tokenizer()
lowercase__ : Union[str, Any] = MgpstrProcessor(tokenizer=a , image_processor=a )
lowercase__ : List[str] = 'test'
lowercase__ : str = processor(text=a )
lowercase__ : List[Any] = tokenizer(a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : Any = self.get_image_processor()
lowercase__ : List[Any] = self.get_tokenizer()
lowercase__ : int = MgpstrProcessor(tokenizer=a , image_processor=a )
lowercase__ : Union[str, Any] = 'test'
lowercase__ : List[Any] = self.prepare_image_inputs()
lowercase__ : List[Any] = processor(text=a , images=a )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'labels'] )
# test if it raises when no input is passed
with pytest.raises(a ):
processor()
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : List[str] = self.get_image_processor()
lowercase__ : Dict = self.get_tokenizer()
lowercase__ : List[str] = MgpstrProcessor(tokenizer=a , image_processor=a )
lowercase__ : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ : List[str] = processor.char_decode(a )
lowercase__ : Union[str, Any] = tokenizer.batch_decode(a )
lowercase__ : Any = [seq.replace(' ' , '' ) for seq in decoded_tok]
self.assertListEqual(a , a )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : List[Any] = self.get_image_processor()
lowercase__ : Tuple = self.get_tokenizer()
lowercase__ : Dict = MgpstrProcessor(tokenizer=a , image_processor=a )
lowercase__ : Optional[Any] = None
lowercase__ : int = self.prepare_image_inputs()
lowercase__ : Optional[int] = processor(text=a , images=a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : List[str] = self.get_image_processor()
lowercase__ : Any = self.get_tokenizer()
lowercase__ : str = MgpstrProcessor(tokenizer=a , image_processor=a )
lowercase__ : List[Any] = torch.randn(1 , 2_7 , 3_8 )
lowercase__ : List[str] = torch.randn(1 , 2_7 , 5_0_2_5_7 )
lowercase__ : List[str] = torch.randn(1 , 2_7 , 3_0_5_2_2 )
lowercase__ : str = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ['generated_text', 'scores', 'char_preds', 'bpe_preds', 'wp_preds'] )
| 77
|
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class __lowerCamelCase :
_lowercase = XGLMConfig
_lowercase = {}
_lowercase = """gelu"""
def __init__( self: Optional[int],A_: Dict,A_: Any=14,A_: Optional[int]=7,A_: str=True,A_: Any=True,A_: Optional[int]=True,A_: Optional[int]=99,A_: List[str]=32,A_: Any=2,A_: Tuple=4,A_: List[str]=37,A_: Dict="gelu",A_: int=0.1,A_: List[str]=0.1,A_: int=512,A_: List[Any]=0.0_2,):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_input_mask
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = d_model
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = ffn_dim
__UpperCamelCase = activation_function
__UpperCamelCase = activation_dropout
__UpperCamelCase = attention_dropout
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = initializer_range
__UpperCamelCase = None
__UpperCamelCase = 0
__UpperCamelCase = 2
__UpperCamelCase = 1
def snake_case_ ( self: Dict ):
'''simple docstring'''
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length],self.vocab_size ),clip_value_min=0,clip_value_max=3 )
__UpperCamelCase = None
if self.use_input_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = self.get_config()
__UpperCamelCase = floats_tensor([self.num_hidden_layers, self.num_attention_heads],2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
return XGLMConfig(
vocab_size=self.vocab_size,d_model=self.hidden_size,num_layers=self.num_hidden_layers,attention_heads=self.num_attention_heads,ffn_dim=self.ffn_dim,activation_function=self.activation_function,activation_dropout=self.activation_dropout,attention_dropout=self.attention_dropout,max_position_embeddings=self.max_position_embeddings,initializer_range=self.initializer_range,use_cache=A_,bos_token_id=self.bos_token_id,eos_token_id=self.eos_token_id,pad_token_id=self.pad_token_id,return_dict=A_,)
def snake_case_ ( self: int ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
), (
__UpperCamelCase
), (
__UpperCamelCase
), (
__UpperCamelCase
),
) = config_and_inputs
__UpperCamelCase = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class __lowerCamelCase (_a , _a , unittest.TestCase ):
_lowercase = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
_lowercase = (TFXGLMForCausalLM,) if is_tf_available() else ()
_lowercase = (
{"""feature-extraction""": TFXGLMModel, """text-generation""": TFXGLMForCausalLM} if is_tf_available() else {}
)
_lowercase = False
_lowercase = False
_lowercase = False
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = TFXGLMModelTester(self )
__UpperCamelCase = ConfigTester(self,config_class=A_,n_embd=37 )
def snake_case_ ( self: Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
@slow
def snake_case_ ( self: Any ):
'''simple docstring'''
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = TFXGLMModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def snake_case_ ( self: Tuple ):
'''simple docstring'''
super().test_resize_token_embeddings()
@require_tf
class __lowerCamelCase (unittest.TestCase ):
@slow
def snake_case_ ( self: Optional[Any],A_: int=True ):
'''simple docstring'''
__UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = tf.convert_to_tensor([[2, 268, 9865]],dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__UpperCamelCase = [2, 268, 9865, 67, 11, 1988, 5_7252, 9865, 5, 984, 67, 1988, 21_3838, 1658, 53, 7_0446, 33, 6657, 278, 1581]
# fmt: on
__UpperCamelCase = model.generate(A_,do_sample=A_,num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist(),A_ )
@slow
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
__UpperCamelCase = tokenizer('Today is a nice day and',return_tensors='tf' )
__UpperCamelCase = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
__UpperCamelCase = model.generate(A_,do_sample=A_,seed=[7, 0] )
__UpperCamelCase = tokenizer.decode(output_ids[0],skip_special_tokens=A_ )
__UpperCamelCase = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(A_,A_ )
@slow
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = 'left'
# use different length sentences to test batching
__UpperCamelCase = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
__UpperCamelCase = tokenizer(A_,return_tensors='tf',padding=A_ )
__UpperCamelCase = inputs['input_ids']
__UpperCamelCase = model.generate(input_ids=A_,attention_mask=inputs['attention_mask'],max_new_tokens=12 )
__UpperCamelCase = tokenizer(sentences[0],return_tensors='tf' ).input_ids
__UpperCamelCase = model.generate(input_ids=A_,max_new_tokens=12 )
__UpperCamelCase = tokenizer(sentences[1],return_tensors='tf' ).input_ids
__UpperCamelCase = model.generate(input_ids=A_,max_new_tokens=12 )
__UpperCamelCase = tokenizer.batch_decode(A_,skip_special_tokens=A_ )
__UpperCamelCase = tokenizer.decode(output_non_padded[0],skip_special_tokens=A_ )
__UpperCamelCase = tokenizer.decode(output_padded[0],skip_special_tokens=A_ )
__UpperCamelCase = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(A_,A_ )
self.assertListEqual(A_,[non_padded_sentence, padded_sentence] )
| 310
| 0
|
"""simple docstring"""
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 78
|
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__snake_case = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json'''
with io.open(filename, '''r''', encoding='''utf-8''') as f:
__snake_case = json.load(f)
@require_torch
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: int,A_: int ):
'''simple docstring'''
return FSMTTokenizer.from_pretrained(A_ )
def snake_case_ ( self: Dict,A_: int ):
'''simple docstring'''
__UpperCamelCase = FSMTForConditionalGeneration.from_pretrained(A_ ).to(A_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 2_6.0],
['ru-en', 2_2.0],
['en-de', 2_2.0],
['de-en', 2_9.0],
] )
@slow
def snake_case_ ( self: Tuple,A_: Any,A_: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = F'''facebook/wmt19-{pair}'''
__UpperCamelCase = self.get_tokenizer(A_ )
__UpperCamelCase = self.get_model(A_ )
__UpperCamelCase = bleu_data[pair]['src']
__UpperCamelCase = bleu_data[pair]['tgt']
__UpperCamelCase = tokenizer(A_,return_tensors='pt',truncation=A_,padding='longest' ).to(A_ )
__UpperCamelCase = model.generate(
input_ids=batch.input_ids,num_beams=8,)
__UpperCamelCase = tokenizer.batch_decode(
A_,skip_special_tokens=A_,clean_up_tokenization_spaces=A_ )
__UpperCamelCase = calculate_bleu(A_,A_ )
print(A_ )
self.assertGreaterEqual(scores['bleu'],A_ )
| 310
| 0
|
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any]=3 , __UpperCAmelCase : Dict=32 , __UpperCAmelCase : List[str]=3 , __UpperCAmelCase : Optional[Any]=10 , __UpperCAmelCase : Optional[int]=[10, 20, 30, 40] , __UpperCAmelCase : Dict=[1, 1, 2, 1] , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : Optional[Any]="relu" , __UpperCAmelCase : Tuple=3 , __UpperCAmelCase : Optional[int]=None , ):
'''simple docstring'''
_A = parent
_A = batch_size
_A = image_size
_A = num_channels
_A = embeddings_size
_A = hidden_sizes
_A = depths
_A = is_training
_A = use_labels
_A = hidden_act
_A = num_labels
_A = scope
_A = len(__UpperCAmelCase )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A = self.get_config()
return config, pixel_values
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[int] ):
'''simple docstring'''
_A = FlaxRegNetModel(config=__UpperCAmelCase )
_A = model(__UpperCAmelCase )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Dict , __UpperCAmelCase : str ):
'''simple docstring'''
_A = self.num_labels
_A = FlaxRegNetForImageClassification(config=__UpperCAmelCase )
_A = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
_A = self.prepare_config_and_inputs()
_A , _A = config_and_inputs
_A = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class _UpperCAmelCase ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
snake_case = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
snake_case = False
snake_case = False
snake_case = False
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
_A = FlaxRegNetModelTester(self )
_A = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
return
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
pass
def lowerCAmelCase ( self : str ):
'''simple docstring'''
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(__UpperCAmelCase )
_A = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A = [*signature.parameters.keys()]
_A = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
def check_hidden_states_output(__UpperCAmelCase : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[int] ):
_A = model_class(__UpperCAmelCase )
_A = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
_A = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_A = self.model_tester.num_stages
self.assertEqual(len(__UpperCAmelCase ) , expected_num_stages + 1 )
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_A = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
_A = model_class(__UpperCAmelCase )
@jax.jit
def model_jitted(__UpperCAmelCase : List[Any] , **__UpperCAmelCase : Any ):
return model(pixel_values=__UpperCAmelCase , **__UpperCAmelCase )
with self.subTest("JIT Enabled" ):
_A = model_jitted(**__UpperCAmelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
_A = model_jitted(**__UpperCAmelCase ).to_tuple()
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) )
for jitted_output, output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def __lowercase ( ) -> str:
'''simple docstring'''
_A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCAmelCase ( self : str ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained("facebook/regnet-y-040" ) if is_vision_available() else None
@slow
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
_A = FlaxRegNetForImageClassification.from_pretrained("facebook/regnet-y-040" )
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(images=__UpperCAmelCase , return_tensors="np" )
_A = model(**__UpperCAmelCase )
# verify the logits
_A = (1, 1000)
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
_A = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
| 79
|
def _A ( _lowercase ) -> list[int]:
"""simple docstring"""
if length <= 0 or not isinstance(_lowercase , _lowercase ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(_lowercase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=1_0))
| 310
| 0
|
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
a__ : int = logging.get_logger(__name__)
def _UpperCamelCase ( __A , __A , __A ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = UniSpeechSatForSequenceClassification.from_pretrained(__A , config=__A )
UpperCamelCase__ = downstream_dict["projector.weight"]
UpperCamelCase__ = downstream_dict["projector.bias"]
UpperCamelCase__ = downstream_dict["model.post_net.linear.weight"]
UpperCamelCase__ = downstream_dict["model.post_net.linear.bias"]
return model
def _UpperCamelCase ( __A , __A , __A ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = UniSpeechSatForAudioFrameClassification.from_pretrained(__A , config=__A )
UpperCamelCase__ = downstream_dict["model.linear.weight"]
UpperCamelCase__ = downstream_dict["model.linear.bias"]
return model
def _UpperCamelCase ( __A , __A , __A ) -> str:
'''simple docstring'''
UpperCamelCase__ = UniSpeechSatForXVector.from_pretrained(__A , config=__A )
UpperCamelCase__ = downstream_dict["connector.weight"]
UpperCamelCase__ = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
UpperCamelCase__ = downstream_dict[
F'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
UpperCamelCase__ = downstream_dict[F'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
UpperCamelCase__ = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
UpperCamelCase__ = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
UpperCamelCase__ = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
UpperCamelCase__ = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
UpperCamelCase__ = downstream_dict["objective.W"]
return model
@torch.no_grad()
def _UpperCamelCase ( __A , __A , __A , __A ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = torch.load(__A , map_location="cpu" )
UpperCamelCase__ = checkpoint["Downstream"]
UpperCamelCase__ = UniSpeechSatConfig.from_pretrained(__A )
UpperCamelCase__ = WavaVecaFeatureExtractor.from_pretrained(
__A , return_attention_mask=__A , do_normalize=__A )
UpperCamelCase__ = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
UpperCamelCase__ = convert_classification(__A , __A , __A )
elif arch.endswith("ForAudioFrameClassification" ):
UpperCamelCase__ = convert_diarization(__A , __A , __A )
elif arch.endswith("ForXVector" ):
UpperCamelCase__ = convert_xvector(__A , __A , __A )
else:
raise NotImplementedError(F'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
UpperCamelCase__ = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(__A )
hf_model.save_pretrained(__A )
if __name__ == "__main__":
a__ : Tuple = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
a__ : Union[str, Any] = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 80
|
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = MgpstrTokenizer
_lowercase = False
_lowercase = {}
_lowercase = False
def snake_case_ ( self: int ):
'''simple docstring'''
super().setUp()
# fmt: off
__UpperCamelCase = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
__UpperCamelCase = dict(zip(A_,range(len(A_ ) ) ) )
__UpperCamelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file,'w',encoding='utf-8' ) as fp:
fp.write(json.dumps(A_ ) + '\n' )
def snake_case_ ( self: Dict,**A_: Tuple ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname,**A_ )
def snake_case_ ( self: List[Any],A_: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = 'tester'
__UpperCamelCase = 'tester'
return input_text, output_text
@unittest.skip('MGP-STR always lower cases letters.' )
def snake_case_ ( self: str ):
'''simple docstring'''
pass
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = self.get_tokenizers(do_lower_case=A_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__UpperCamelCase = '[SPECIAL_TOKEN]'
tokenizer.add_special_tokens({'cls_token': special_token} )
__UpperCamelCase = tokenizer.encode([special_token],add_special_tokens=A_ )
self.assertEqual(len(A_ ),1 )
__UpperCamelCase = tokenizer.decode(A_,skip_special_tokens=A_ )
self.assertTrue(special_token not in decoded )
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__UpperCamelCase, __UpperCamelCase = self.get_input_output_texts(A_ )
__UpperCamelCase = tokenizer.tokenize(A_ )
__UpperCamelCase = tokenizer.convert_tokens_to_ids(A_ )
__UpperCamelCase = tokenizer.encode(A_,add_special_tokens=A_ )
self.assertListEqual(A_,A_ )
__UpperCamelCase = tokenizer.convert_ids_to_tokens(A_ )
self.assertNotEqual(len(A_ ),0 )
__UpperCamelCase = tokenizer.decode(A_ )
self.assertIsInstance(A_,A_ )
self.assertEqual(text_a.replace(' ','' ),A_ )
@unittest.skip('MGP-STR tokenizer only handles one sequence.' )
def snake_case_ ( self: int ):
'''simple docstring'''
pass
@unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer' )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
pass
| 310
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.