code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class _A ( a__ ):
_UpperCamelCase : Tuple = 4_2
_UpperCamelCase : str = 4_2
class _A ( a__ , a__ ):
_UpperCamelCase : Tuple = 1
@register_to_config
def __init__( self : str , _A : int = 2_000 , _A : float = 0.15 , _A : float = 0.01 , _A : float = 1_348.0 , _A : float = 1E-5 , _A : int = 1 , ) -> int:
"""simple docstring"""
lowercase : Union[str, Any] = sigma_max
# setable values
lowercase : Dict = None
self.set_sigmas(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def __a ( self : Tuple , _A : torch.FloatTensor , _A : Optional[int] = None ) -> Tuple:
"""simple docstring"""
return sample
def __a ( self : Optional[int] , _A : int , _A : float = None , _A : Union[str, torch.device] = None ) -> Tuple:
"""simple docstring"""
lowercase : Optional[Any] = sampling_eps if sampling_eps is not None else self.config.sampling_eps
lowercase : Dict = torch.linspace(1 , UpperCamelCase_ , UpperCamelCase_ , device=UpperCamelCase_ )
def __a ( self : str , _A : int , _A : float = None , _A : float = None , _A : float = None ) -> str:
"""simple docstring"""
lowercase : Optional[Any] = sigma_min if sigma_min is not None else self.config.sigma_min
lowercase : Union[str, Any] = sigma_max if sigma_max is not None else self.config.sigma_max
lowercase : int = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(UpperCamelCase_ , UpperCamelCase_ )
lowercase : Optional[int] = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
lowercase : Any = torch.exp(torch.linspace(math.log(UpperCamelCase_ ) , math.log(UpperCamelCase_ ) , UpperCamelCase_ ) )
lowercase : Dict = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def __a ( self : Union[str, Any] , _A : Union[str, Any] , _A : List[Any] ) -> Any:
"""simple docstring"""
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def __a ( self : List[Any] , _A : torch.FloatTensor , _A : int , _A : torch.FloatTensor , _A : Optional[torch.Generator] = None , _A : bool = True , ) -> Tuple:
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
lowercase : Optional[int] = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
lowercase : Any = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
lowercase : int = timesteps.to(self.discrete_sigmas.device )
lowercase : Union[str, Any] = self.discrete_sigmas[timesteps].to(sample.device )
lowercase : Any = self.get_adjacent_sigma(UpperCamelCase_ , UpperCamelCase_ ).to(sample.device )
lowercase : Union[str, Any] = torch.zeros_like(UpperCamelCase_ )
lowercase : str = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
lowercase : List[str] = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
lowercase : List[Any] = diffusion.unsqueeze(-1 )
lowercase : int = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
lowercase : str = randn_tensor(
sample.shape , layout=sample.layout , generator=UpperCamelCase_ , device=sample.device , dtype=sample.dtype )
lowercase : Optional[Any] = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
lowercase : Union[str, Any] = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=UpperCamelCase_ , prev_sample_mean=UpperCamelCase_ )
def __a ( self : Any , _A : torch.FloatTensor , _A : torch.FloatTensor , _A : Optional[torch.Generator] = None , _A : bool = True , ) -> List[str]:
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
lowercase : Union[str, Any] = randn_tensor(sample.shape , layout=sample.layout , generator=UpperCamelCase_ ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
lowercase : Tuple = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
lowercase : str = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
lowercase : Optional[int] = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
lowercase : Tuple = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
lowercase : Any = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
lowercase : str = step_size.unsqueeze(-1 )
lowercase : Optional[Any] = sample + step_size * model_output
lowercase : List[str] = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCamelCase_ )
def __a ( self : str , _A : torch.FloatTensor , _A : torch.FloatTensor , _A : torch.FloatTensor , ) -> List[Any]:
"""simple docstring"""
lowercase : List[Any] = timesteps.to(original_samples.device )
lowercase : Optional[int] = self.discrete_sigmas.to(original_samples.device )[timesteps]
lowercase : str = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(UpperCamelCase_ ) * sigmas[:, None, None, None]
)
lowercase : int = noise + original_samples
return noisy_samples
def __len__( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return self.config.num_train_timesteps
| 308
|
"""simple docstring"""
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def _snake_case ( _snake_case : int ):
for param in module.parameters():
lowerCAmelCase : Optional[int] = False
def _snake_case ( ):
lowerCAmelCase : List[str] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowerCAmelCase : Any = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def _snake_case ( _snake_case : Dict ):
lowerCAmelCase : Optional[int] = plt.imshow(_snake_case )
fig.axes.get_xaxis().set_visible(_snake_case )
fig.axes.get_yaxis().set_visible(_snake_case )
plt.show()
def _snake_case ( ):
lowerCAmelCase : List[str] = datetime.now()
lowerCAmelCase : Union[str, Any] = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 60
| 0
|
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def __lowerCAmelCase ( __UpperCAmelCase : str ):
'''simple docstring'''
for param in module.parameters():
__snake_case : int = False
def __lowerCAmelCase ( ):
'''simple docstring'''
__snake_case : List[str] = """cuda""" if torch.cuda.is_available() else """cpu"""
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
__snake_case : List[Any] = """mps"""
if device == "mps":
print(
"""WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"""
""" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"""
""" with generations.""" )
return device
def __lowerCAmelCase ( __UpperCAmelCase : Optional[Any] ):
'''simple docstring'''
__snake_case : Any = plt.imshow(__UpperCAmelCase )
fig.axes.get_xaxis().set_visible(__UpperCAmelCase )
fig.axes.get_yaxis().set_visible(__UpperCAmelCase )
plt.show()
def __lowerCAmelCase ( ):
'''simple docstring'''
__snake_case : Tuple = datetime.now()
__snake_case : Dict = current_time.strftime("""%H:%M:%S""" )
return timestamp
| 367
|
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : List[Any] = "encodec"
def __init__( self : Tuple , _lowerCAmelCase : Union[str, Any]=[1.5, 3.0, 6.0, 12.0, 24.0] , _lowerCAmelCase : Tuple=2_40_00 , _lowerCAmelCase : List[Any]=1 , _lowerCAmelCase : Optional[int]=False , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : int=1_28 , _lowerCAmelCase : List[Any]=32 , _lowerCAmelCase : Optional[Any]=1 , _lowerCAmelCase : Union[str, Any]=[8, 5, 4, 2] , _lowerCAmelCase : str="weight_norm" , _lowerCAmelCase : Tuple=7 , _lowerCAmelCase : str=7 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : int=2 , _lowerCAmelCase : str=True , _lowerCAmelCase : Dict="reflect" , _lowerCAmelCase : Tuple=2 , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : int=1.0 , _lowerCAmelCase : Optional[int]=10_24 , _lowerCAmelCase : int=None , _lowerCAmelCase : List[str]=True , **_lowerCAmelCase : List[Any] , ):
__snake_case : Optional[int] = target_bandwidths
__snake_case : int = sampling_rate
__snake_case : List[Any] = audio_channels
__snake_case : str = normalize
__snake_case : Union[str, Any] = chunk_length_s
__snake_case : Union[str, Any] = overlap
__snake_case : Union[str, Any] = hidden_size
__snake_case : Union[str, Any] = num_filters
__snake_case : Optional[Any] = num_residual_layers
__snake_case : List[Any] = upsampling_ratios
__snake_case : List[str] = norm_type
__snake_case : Union[str, Any] = kernel_size
__snake_case : Optional[int] = last_kernel_size
__snake_case : Optional[Any] = residual_kernel_size
__snake_case : Dict = dilation_growth_rate
__snake_case : int = use_causal_conv
__snake_case : Tuple = pad_mode
__snake_case : str = compress
__snake_case : Optional[Any] = num_lstm_layers
__snake_case : List[Any] = trim_right_ratio
__snake_case : Any = codebook_size
__snake_case : int = codebook_dim if codebook_dim is not None else hidden_size
__snake_case : int = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'''self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}''' )
super().__init__(**_lowerCAmelCase )
@property
def snake_case__ ( self : int ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def snake_case__ ( self : int ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def snake_case__ ( self : Union[str, Any] ):
__snake_case : List[str] = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def snake_case__ ( self : Tuple ):
return int(10_00 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 20
| 0
|
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
_A : int = ['bart.large', 'bart.large.mnli', 'bart.large.cnn', 'bart_xsum/model.pt']
_A : List[Any] = {'bart.large': BartModel, 'bart.large.mnli': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('0.9.0'):
raise Exception('requires fairseq >= 0.9.0')
logging.set_verbosity_info()
_A : Tuple = logging.get_logger(__name__)
_A : Optional[Any] = ' Hello world! cécé herlolip'
_A : List[str] = [
('model.classification_heads.mnli.dense.weight', 'classification_head.dense.weight'),
('model.classification_heads.mnli.dense.bias', 'classification_head.dense.bias'),
('model.classification_heads.mnli.out_proj.weight', 'classification_head.out_proj.weight'),
('model.classification_heads.mnli.out_proj.bias', 'classification_head.out_proj.bias'),
]
def _a ( UpperCAmelCase ) -> Tuple:
"""simple docstring"""
lowerCamelCase__ : Optional[int] = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase , UpperCAmelCase )
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__ : List[str] = dct.pop(UpperCAmelCase )
lowerCamelCase__ : List[str] = val
def _a ( UpperCAmelCase ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ : Any = torch.load(UpperCAmelCase , map_location='''cpu''' )
lowerCamelCase__ : List[Any] = torch.hub.load('''pytorch/fairseq''' , '''bart.large.cnn''' ).eval()
hub_interface.model.load_state_dict(sd['''model'''] )
return hub_interface
def _a ( UpperCAmelCase ) -> str:
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ : List[str] = emb.weight.shape
lowerCamelCase__ : Optional[Any] = nn.Linear(UpperCAmelCase , UpperCAmelCase , bias=UpperCAmelCase )
lowerCamelCase__ : int = emb.weight.data
return lin_layer
@torch.no_grad()
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None ) -> Optional[Any]:
"""simple docstring"""
if not os.path.exists(UpperCAmelCase ):
lowerCamelCase__ : Tuple = torch.hub.load('''pytorch/fairseq''' , UpperCAmelCase ).eval()
else:
lowerCamelCase__ : Tuple = load_xsum_checkpoint(UpperCAmelCase )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
lowerCamelCase__ : Any = checkpoint_path.replace('''.''' , '''-''' )
lowerCamelCase__ : Optional[int] = BartConfig.from_pretrained(UpperCAmelCase )
lowerCamelCase__ : Dict = bart.encode(UpperCAmelCase ).unsqueeze(0 )
lowerCamelCase__ : Union[str, Any] = BartTokenizer.from_pretrained(UpperCAmelCase ).encode(UpperCAmelCase , return_tensors='''pt''' ).unsqueeze(0 )
if not torch.eq(UpperCAmelCase , UpperCAmelCase ).all():
raise ValueError(
f"converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}" )
if checkpoint_path == "bart.large.mnli":
lowerCamelCase__ : Tuple = bart.state_dict()
remove_ignore_keys_(UpperCAmelCase )
lowerCamelCase__ : Dict = state_dict['''model.decoder.embed_tokens.weight''']
for src, dest in mnli_rename_keys:
rename_key(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Tuple = BartForSequenceClassification(UpperCAmelCase ).eval()
model.load_state_dict(UpperCAmelCase )
lowerCamelCase__ : Dict = bart.predict('''mnli''' , UpperCAmelCase , return_logits=UpperCAmelCase )
lowerCamelCase__ : List[Any] = model(UpperCAmelCase )[0] # logits
else: # no classification heads to worry about
lowerCamelCase__ : int = bart.model.state_dict()
remove_ignore_keys_(UpperCAmelCase )
lowerCamelCase__ : List[str] = state_dict['''decoder.embed_tokens.weight''']
lowerCamelCase__ : Any = bart.extract_features(UpperCAmelCase )
if hf_checkpoint_name == "facebook/bart-large":
lowerCamelCase__ : List[str] = BartModel(UpperCAmelCase ).eval()
model.load_state_dict(UpperCAmelCase )
lowerCamelCase__ : Any = model(UpperCAmelCase ).model[0]
else:
lowerCamelCase__ : str = BartForConditionalGeneration(UpperCAmelCase ).eval() # an existing summarization ckpt
model.model.load_state_dict(UpperCAmelCase )
if hasattr(UpperCAmelCase , '''lm_head''' ):
lowerCamelCase__ : Optional[Any] = make_linear_from_emb(model.model.shared )
lowerCamelCase__ : Any = model.model(UpperCAmelCase )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
f"`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}" )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError('''Some values in `fairseq_output` are different from `new_model_outputs`''' )
Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase )
model.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
_A : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config', default=None, type=str, help='Which huggingface architecture to use: bart-large-xsum'
)
_A : Optional[int] = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 142
|
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def _a ( UpperCAmelCase ) -> Dict:
"""simple docstring"""
lowerCamelCase__ : Dict = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase , UpperCAmelCase )
def _a ( UpperCAmelCase ) -> Any:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
lowerCamelCase__ : Any = s_dict.pop(UpperCAmelCase )
elif "subsample" in key:
lowerCamelCase__ : Any = s_dict.pop(UpperCAmelCase )
def _a ( UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = emb.weight.shape
lowerCamelCase__ : str = nn.Linear(UpperCAmelCase , UpperCAmelCase , bias=UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = emb.weight.data
return lin_layer
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ : List[Any] = torch.load(UpperCAmelCase , map_location='''cpu''' )
lowerCamelCase__ : List[Any] = mam_aaa['''args''']
lowerCamelCase__ : Dict = mam_aaa['''model''']
lowerCamelCase__ : Optional[Any] = state_dict['''decoder.output_projection.weight''']
remove_ignore_keys_(UpperCAmelCase )
rename_keys(UpperCAmelCase )
lowerCamelCase__ : Tuple = state_dict['''decoder.embed_tokens.weight'''].shape[0]
lowerCamelCase__ : Tuple = args.share_decoder_input_output_embed
lowerCamelCase__ : Dict = [int(UpperCAmelCase ) for i in args.conv_kernel_sizes.split(''',''' )]
lowerCamelCase__ : str = SpeechaTextConfig(
vocab_size=UpperCAmelCase , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , num_conv_layers=len(UpperCAmelCase ) , conv_channels=args.conv_channels , conv_kernel_sizes=UpperCAmelCase , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=UpperCAmelCase , num_beams=5 , max_length=200 , use_cache=UpperCAmelCase , decoder_start_token_id=2 , early_stopping=UpperCAmelCase , )
lowerCamelCase__ : Optional[int] = SpeechaTextForConditionalGeneration(UpperCAmelCase )
lowerCamelCase__ , lowerCamelCase__ : Dict = model.model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
if len(UpperCAmelCase ) > 0 and not set(UpperCAmelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
f" but all the following weights are missing {missing}" )
if tie_embeds:
lowerCamelCase__ : List[str] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
lowerCamelCase__ : Tuple = lm_head_weights
model.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
_A : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
_A : str = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 142
| 1
|
'''simple docstring'''
from __future__ import annotations
def a__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : list[str] | None = None , _SCREAMING_SNAKE_CASE : dict[str, float] | None = None , _SCREAMING_SNAKE_CASE : bool = False , ) -> tuple[int, float, str]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = cipher_alphabet or [chr(_SCREAMING_SNAKE_CASE ) for i in range(97 , 1_23 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
UpperCAmelCase_ : str = {
"a": 0.08_497,
"b": 0.01_492,
"c": 0.02_202,
"d": 0.04_253,
"e": 0.11_162,
"f": 0.02_228,
"g": 0.02_015,
"h": 0.06_094,
"i": 0.07_546,
"j": 0.00_153,
"k": 0.01_292,
"l": 0.04_025,
"m": 0.02_406,
"n": 0.06_749,
"o": 0.07_507,
"p": 0.01_929,
"q": 0.00_095,
"r": 0.07_587,
"s": 0.06_327,
"t": 0.09_356,
"u": 0.02_758,
"v": 0.00_978,
"w": 0.02_560,
"x": 0.00_150,
"y": 0.01_994,
"z": 0.00_077,
}
else:
# Custom frequencies dictionary
UpperCAmelCase_ : List[Any] = frequencies_dict
if not case_sensitive:
UpperCAmelCase_ : Tuple = ciphertext.lower()
# Chi squared statistic values
UpperCAmelCase_ : dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(_SCREAMING_SNAKE_CASE ) ):
UpperCAmelCase_ : Optional[Any] = ""
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
UpperCAmelCase_ : Optional[int] = (alphabet_letters.index(letter.lower() ) - shift) % len(
_SCREAMING_SNAKE_CASE )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
UpperCAmelCase_ : str = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
UpperCAmelCase_ : Dict = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
UpperCAmelCase_ : str = decrypted_with_shift.lower().count(_SCREAMING_SNAKE_CASE )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
UpperCAmelCase_ : Any = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
UpperCAmelCase_ : Dict = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
UpperCAmelCase_ : Optional[int] = decrypted_with_shift.count(_SCREAMING_SNAKE_CASE )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
UpperCAmelCase_ : Tuple = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
UpperCAmelCase_ : Optional[Any] = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
UpperCAmelCase_ : int = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(_SCREAMING_SNAKE_CASE : int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
UpperCAmelCase_ : int = min(
_SCREAMING_SNAKE_CASE , key=_SCREAMING_SNAKE_CASE , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Optional[Any] = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 67
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def a__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ) -> str | Literal[False]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = 0
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if lista[i] != lista[i]:
count += 1
UpperCAmelCase_ : List[str] = "_"
if count > 1:
return False
else:
return "".join(_SCREAMING_SNAKE_CASE )
def a__ ( _SCREAMING_SNAKE_CASE : list[str] ) -> list[str]:
"""simple docstring"""
UpperCAmelCase_ : List[str] = []
while True:
UpperCAmelCase_ : Any = ["$"] * len(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = []
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
for j in range(i + 1 , len(_SCREAMING_SNAKE_CASE ) ):
UpperCAmelCase_ : Optional[int] = compare_string(binary[i] , binary[j] )
if k is False:
UpperCAmelCase_ : Union[str, Any] = "*"
UpperCAmelCase_ : List[Any] = "*"
temp.append("X" )
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(_SCREAMING_SNAKE_CASE ) == 0:
return pi
UpperCAmelCase_ : str = list(set(_SCREAMING_SNAKE_CASE ) )
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Sequence[float] ) -> list[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = []
for minterm in minterms:
UpperCAmelCase_ : Optional[Any] = ""
for _ in range(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Dict = str(minterm % 2 ) + string
minterm //= 2
temp.append(_SCREAMING_SNAKE_CASE )
return temp
def a__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int ) -> bool:
"""simple docstring"""
UpperCAmelCase_ : Tuple = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = 0
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def a__ ( _SCREAMING_SNAKE_CASE : list[list[int]] , _SCREAMING_SNAKE_CASE : list[str] ) -> list[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : str = [0] * len(_SCREAMING_SNAKE_CASE )
for i in range(len(chart[0] ) ):
UpperCAmelCase_ : Optional[Any] = 0
UpperCAmelCase_ : Optional[int] = -1
for j in range(len(_SCREAMING_SNAKE_CASE ) ):
if chart[j][i] == 1:
count += 1
UpperCAmelCase_ : Any = j
if count == 1:
UpperCAmelCase_ : Any = 1
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(_SCREAMING_SNAKE_CASE ) ):
UpperCAmelCase_ : int = 0
temp.append(prime_implicants[i] )
while True:
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : List[str] = -1
UpperCAmelCase_ : int = 0
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
UpperCAmelCase_ : str = chart[i].count(1 )
if count_n > max_n:
UpperCAmelCase_ : List[str] = count_n
UpperCAmelCase_ : Dict = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(_SCREAMING_SNAKE_CASE ) ):
UpperCAmelCase_ : int = 0
def a__ ( _SCREAMING_SNAKE_CASE : list[str] , _SCREAMING_SNAKE_CASE : list[str] ) -> list[list[int]]:
"""simple docstring"""
UpperCAmelCase_ : Dict = [[0 for x in range(len(_SCREAMING_SNAKE_CASE ) )] for x in range(len(_SCREAMING_SNAKE_CASE ) )]
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
UpperCAmelCase_ : Optional[int] = prime_implicants[i].count("_" )
for j in range(len(_SCREAMING_SNAKE_CASE ) ):
if is_for_table(prime_implicants[i] , binary[j] , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : str = 1
return chart
def a__ ( ) -> None:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = int(input("Enter the no. of variables\n" ) )
UpperCAmelCase_ : Tuple = [
float(_SCREAMING_SNAKE_CASE )
for x in input(
"Enter the decimal representation of Minterms 'Spaces Separated'\n" ).split()
]
UpperCAmelCase_ : int = decimal_to_binary(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = check(_SCREAMING_SNAKE_CASE )
print("Prime Implicants are:" )
print(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = prime_implicant_chart(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = selection(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print("Essential Prime Implicants are:" )
print(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 67
| 1
|
"""simple docstring"""
_a = [
'Audio',
'Array2D',
'Array3D',
'Array4D',
'Array5D',
'ClassLabel',
'Features',
'Sequence',
'Value',
'Image',
'Translation',
'TranslationVariableLanguages',
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 61
|
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_UpperCamelCase = '''pt'''
elif is_tf_available():
_UpperCamelCase = '''tf'''
else:
_UpperCamelCase = '''jax'''
class _A ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Dict = ByTaTokenizer
_SCREAMING_SNAKE_CASE : List[Any] = False
def __A ( self ) -> int:
'''simple docstring'''
super().setUp()
__UpperCAmelCase : Tuple = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __A ( self ) -> Optional[int]:
'''simple docstring'''
return ByTaTokenizer.from_pretrained("""google/byt5-small""" )
def __A ( self , **__UpperCAmelCase ) -> ByTaTokenizer:
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=20 , __UpperCAmelCase=5 ) -> Tuple[str, list]:
'''simple docstring'''
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
__UpperCAmelCase : Optional[Any] = []
for i in range(len(__UpperCAmelCase ) ):
try:
__UpperCAmelCase : List[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=__UpperCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__UpperCAmelCase : List[Any] = list(filter(lambda __UpperCAmelCase : re.match(r"""^[ a-zA-Z]+$""" , t[1] ) , __UpperCAmelCase ) )
__UpperCAmelCase : List[Any] = list(filter(lambda __UpperCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__UpperCAmelCase ) , __UpperCAmelCase ) )
if max_length is not None and len(__UpperCAmelCase ) > max_length:
__UpperCAmelCase : Dict = toks[:max_length]
if min_length is not None and len(__UpperCAmelCase ) < min_length and len(__UpperCAmelCase ) > 0:
while len(__UpperCAmelCase ) < min_length:
__UpperCAmelCase : Dict = toks + toks
# toks_str = [t[1] for t in toks]
__UpperCAmelCase : Tuple = [t[0] for t in toks]
# Ensure consistency
__UpperCAmelCase : Union[str, Any] = tokenizer.decode(__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase )
if " " not in output_txt and len(__UpperCAmelCase ) > 1:
__UpperCAmelCase : Dict = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__UpperCAmelCase )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__UpperCAmelCase )
)
if with_prefix_space:
__UpperCAmelCase : List[Any] = """ """ + output_txt
__UpperCAmelCase : Union[str, Any] = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
return output_txt, output_ids
def __A ( self ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : List[str] = self.ta_base_tokenizer
__UpperCAmelCase : Optional[int] = tokenizer(["""hi</s>""", """I went to the gym</s>""", """</s>"""] )
__UpperCAmelCase : List[str] = tokenizer(["""hi""", """I went to the gym""", """"""] )
self.assertListEqual(batch_with_eos_added["""input_ids"""] , batch_without_eos_added["""input_ids"""] )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.ta_base_tokenizer
__UpperCAmelCase : List[Any] = """Unicode €."""
__UpperCAmelCase : Dict = tokenizer(__UpperCAmelCase )
__UpperCAmelCase : Tuple = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["""input_ids"""] , __UpperCAmelCase )
# decoding
__UpperCAmelCase : List[Any] = tokenizer.decode(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , """Unicode €.</s>""" )
__UpperCAmelCase : Dict = tokenizer("""e è é ê ë""" )
__UpperCAmelCase : List[str] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["""input_ids"""] , __UpperCAmelCase )
# decoding
__UpperCAmelCase : Union[str, Any] = tokenizer.decode(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , """e è é ê ë</s>""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ) , """e è é ê ë</s>""" )
def __A ( self ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Dict = self.ta_base_tokenizer
__UpperCAmelCase : Union[str, Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
__UpperCAmelCase : Optional[int] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
__UpperCAmelCase : Any = tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
if FRAMEWORK != "jax":
__UpperCAmelCase : List[str] = list(batch.input_ids.numpy()[0] )
else:
__UpperCAmelCase : Tuple = list(batch.input_ids.tolist()[0] )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = self.ta_base_tokenizer
__UpperCAmelCase : Optional[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__UpperCAmelCase : Tuple = tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""" , __UpperCAmelCase )
self.assertIn("""attention_mask""" , __UpperCAmelCase )
self.assertNotIn("""decoder_input_ids""" , __UpperCAmelCase )
self.assertNotIn("""decoder_attention_mask""" , __UpperCAmelCase )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = self.ta_base_tokenizer
__UpperCAmelCase : Any = [
"""Summary of the text.""",
"""Another summary.""",
]
__UpperCAmelCase : List[str] = tokenizer(
text_target=__UpperCAmelCase , max_length=32 , padding="""max_length""" , truncation=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.ta_base_tokenizer
__UpperCAmelCase : Optional[int] = ["""A long paragraph for summarization. </s>"""]
__UpperCAmelCase : Tuple = ["""Summary of the text. </s>"""]
# fmt: off
__UpperCAmelCase : Optional[Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
__UpperCAmelCase : List[str] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
__UpperCAmelCase : Optional[int] = tokenizer(__UpperCAmelCase , text_target=__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , batch["""input_ids"""][0] )
self.assertEqual(__UpperCAmelCase , batch["""labels"""][0] )
def __A ( self ) -> List[str]:
'''simple docstring'''
# safety check on max_len default value so we are sure the test works
__UpperCAmelCase : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__UpperCAmelCase : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
__UpperCAmelCase : Any = tempfile.mkdtemp()
__UpperCAmelCase : Any = """ He is very happy, UNwant\u00E9d,running"""
__UpperCAmelCase : Optional[Any] = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
tokenizer.save_pretrained(__UpperCAmelCase )
__UpperCAmelCase : Optional[Any] = tokenizer.__class__.from_pretrained(__UpperCAmelCase )
__UpperCAmelCase : List[Any] = after_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
shutil.rmtree(__UpperCAmelCase )
__UpperCAmelCase : Optional[int] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
__UpperCAmelCase : str = tempfile.mkdtemp()
__UpperCAmelCase : Dict = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["""bim""", """bambam"""] )
__UpperCAmelCase : int = tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
__UpperCAmelCase : str = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
tokenizer.save_pretrained(__UpperCAmelCase )
__UpperCAmelCase : Tuple = tokenizer.__class__.from_pretrained(__UpperCAmelCase )
__UpperCAmelCase : Tuple = after_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertIn("""new_additional_special_token""" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__UpperCAmelCase : Any = tokenizer.__class__.from_pretrained(__UpperCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__UpperCAmelCase )
def __A ( self ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : int = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__UpperCAmelCase )
with open(os.path.join(__UpperCAmelCase , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
__UpperCAmelCase : Optional[Any] = json.load(__UpperCAmelCase )
with open(os.path.join(__UpperCAmelCase , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
__UpperCAmelCase : Optional[int] = json.load(__UpperCAmelCase )
__UpperCAmelCase : Any = [f'<extra_id_{i}>' for i in range(125 )]
__UpperCAmelCase : Optional[int] = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
__UpperCAmelCase : Optional[Any] = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(__UpperCAmelCase , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
with open(os.path.join(__UpperCAmelCase , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__UpperCAmelCase : int = tokenizer_class.from_pretrained(
__UpperCAmelCase , )
self.assertIn(
"""an_additional_special_token""" , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["""an_additional_special_token"""] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__UpperCAmelCase : int = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""" , lstrip=__UpperCAmelCase )]
__UpperCAmelCase : List[str] = tokenizer_class.from_pretrained(
__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , )
self.assertIn("""a_new_additional_special_token""" , tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ) , )
def __A ( self ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__UpperCAmelCase )
__UpperCAmelCase : Any = tokenizer_class.from_pretrained(__UpperCAmelCase )
self.assertTrue(tokenizer.decode([255] ) == """""" )
def __A ( self ) -> List[str]:
'''simple docstring'''
pass
def __A ( self ) -> str:
'''simple docstring'''
pass
def __A ( self ) -> List[str]:
'''simple docstring'''
pass
def __A ( self ) -> str:
'''simple docstring'''
pass
def __A ( self ) -> Any:
'''simple docstring'''
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
__UpperCAmelCase : Tuple = self.get_tokenizers(fast=__UpperCAmelCase , do_lower_case=__UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
__UpperCAmelCase : Optional[int] = ["""t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """x""", """t""", """</s>"""]
__UpperCAmelCase : List[str] = tokenizer.convert_tokens_to_string(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
def __A ( self ) -> Any:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
__UpperCAmelCase : List[str] = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
__UpperCAmelCase : List[str] = 0
__UpperCAmelCase : Dict = tokenizer.convert_ids_to_tokens(
__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
for attr in attributes_list:
setattr(__UpperCAmelCase , attr + """_id""" , __UpperCAmelCase )
self.assertEqual(getattr(__UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(getattr(__UpperCAmelCase , attr + """_id""" ) , __UpperCAmelCase )
setattr(__UpperCAmelCase , attr + """_id""" , __UpperCAmelCase )
self.assertEqual(getattr(__UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(getattr(__UpperCAmelCase , attr + """_id""" ) , __UpperCAmelCase )
setattr(__UpperCAmelCase , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(__UpperCAmelCase , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(__UpperCAmelCase , """additional_special_tokens_ids""" ) , [] )
setattr(__UpperCAmelCase , """additional_special_tokens_ids""" , [token_id_to_test_setters] )
self.assertListEqual(getattr(__UpperCAmelCase , """additional_special_tokens""" ) , [token_to_test_setters] )
self.assertListEqual(getattr(__UpperCAmelCase , """additional_special_tokens_ids""" ) , [token_id_to_test_setters] )
| 254
| 0
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
UpperCAmelCase_ : Dict = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
@dataclass
class SCREAMING_SNAKE_CASE__ :
snake_case__ : Optional[str] = field(
default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
snake_case__ : Optional[str] = field(
default=lowercase__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
snake_case__ : Optional[str] = field(
default=lowercase__ , metadata={'''help''': '''The column name of the images in the files.'''} )
snake_case__ : Optional[str] = field(default=lowercase__ , metadata={'''help''': '''A folder containing the training data.'''} )
snake_case__ : Optional[str] = field(default=lowercase__ , metadata={'''help''': '''A folder containing the validation data.'''} )
snake_case__ : Optional[float] = field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
snake_case__ : Optional[int] = field(
default=lowercase__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
snake_case__ : Optional[int] = field(
default=lowercase__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
a_ : Optional[int] = {}
if self.train_dir is not None:
a_ : List[str] = self.train_dir
if self.validation_dir is not None:
a_ : Dict = self.validation_dir
a_ : Dict = data_files if data_files else None
@dataclass
class SCREAMING_SNAKE_CASE__ :
snake_case__ : str = field(
default=lowercase__ , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
snake_case__ : Optional[str] = field(
default=lowercase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name_or_path'''} )
snake_case__ : Optional[str] = field(
default=lowercase__ , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
snake_case__ : Optional[str] = field(
default=lowercase__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} )
snake_case__ : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
snake_case__ : str = field(default=lowercase__ , metadata={'''help''': '''Name or path of preprocessor config.'''} )
snake_case__ : bool = field(
default=lowercase__ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
snake_case__ : float = field(
default=0.75 , metadata={'''help''': '''The ratio of the number of masked tokens in the input sequence.'''} )
snake_case__ : bool = field(
default=lowercase__ , metadata={'''help''': '''Whether or not to train with normalized pixel values as target.'''} )
@dataclass
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : float = field(
default=1e-3 , metadata={'''help''': '''Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'''} )
def SCREAMING_SNAKE_CASE_ ( __A : List[Any] ) -> Optional[Any]:
"""simple docstring"""
a_ : str = torch.stack([example['pixel_values'] for example in examples] )
return {"pixel_values": pixel_values}
def SCREAMING_SNAKE_CASE_ ( ) -> Dict:
"""simple docstring"""
a_ : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a_ , a_ , a_ : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
a_ , a_ , a_ : List[Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mae' , __A , __A )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
a_ : Optional[int] = training_args.get_process_log_level()
logger.setLevel(__A )
transformers.utils.logging.set_verbosity(__A )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
a_ : List[str] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
a_ : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
a_ : Optional[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
a_ : int = None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __A ) and data_args.train_val_split > 0.0:
a_ : Optional[Any] = ds['train'].train_test_split(data_args.train_val_split )
a_ : Optional[int] = split['train']
a_ : int = split['test']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a_ : Dict = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
a_ : Optional[Any] = ViTMAEConfig.from_pretrained(model_args.config_name , **__A )
elif model_args.model_name_or_path:
a_ : Any = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **__A )
else:
a_ : Union[str, Any] = ViTMAEConfig()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
# adapt config
config.update(
{
'mask_ratio': model_args.mask_ratio,
'norm_pix_loss': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
a_ : Union[str, Any] = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **__A )
elif model_args.model_name_or_path:
a_ : Optional[int] = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **__A )
else:
a_ : Dict = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
a_ : Union[str, Any] = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__A , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
a_ : Tuple = ViTMAEForPreTraining(__A )
if training_args.do_train:
a_ : List[str] = ds['train'].column_names
else:
a_ : Dict = ds['validation'].column_names
if data_args.image_column_name is not None:
a_ : Any = data_args.image_column_name
elif "image" in column_names:
a_ : Any = 'image'
elif "img" in column_names:
a_ : Any = 'img'
else:
a_ : List[Any] = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
a_ : Dict = image_processor.size['shortest_edge']
else:
a_ : Union[str, Any] = (image_processor.size['height'], image_processor.size['width'])
a_ : Dict = Compose(
[
Lambda(lambda __A : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(__A , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(__A : Dict ):
a_ : str = [transforms(__A ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
a_ : int = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(__A )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
a_ : List[Any] = (
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(__A )
# Compute absolute learning rate
a_ : Optional[int] = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
a_ : Optional[Any] = training_args.base_learning_rate * total_train_batch_size / 2_56
# Initialize our trainer
a_ : Dict = Trainer(
model=__A , args=__A , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=__A , data_collator=__A , )
# Training
if training_args.do_train:
a_ : Any = None
if training_args.resume_from_checkpoint is not None:
a_ : Any = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
a_ : Optional[Any] = last_checkpoint
a_ : Optional[int] = trainer.train(resume_from_checkpoint=__A )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
a_ : Union[str, Any] = trainer.evaluate()
trainer.log_metrics('eval' , __A )
trainer.save_metrics('eval' , __A )
# Write model card and (optionally) push to hub
a_ : Any = {
'tasks': 'masked-auto-encoding',
'dataset': data_args.dataset_name,
'tags': ['masked-auto-encoding'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__A )
else:
trainer.create_model_card(**__A )
def SCREAMING_SNAKE_CASE_ ( __A : str ) -> Optional[int]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 120
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : str = {
'Salesforce/instruct-blip-flan-t5': 'https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : int = '''instructblip_vision_model'''
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]=1_4_0_8 , SCREAMING_SNAKE_CASE__ : int=6_1_4_4 , SCREAMING_SNAKE_CASE__ : Dict=3_9 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_6 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2_2_4 , SCREAMING_SNAKE_CASE__ : Optional[int]=1_4 , SCREAMING_SNAKE_CASE__ : List[Any]="gelu" , SCREAMING_SNAKE_CASE__ : List[str]=1E-6 , SCREAMING_SNAKE_CASE__ : Tuple=0.0 , SCREAMING_SNAKE_CASE__ : List[str]=1E-10 , SCREAMING_SNAKE_CASE__ : str=True , **SCREAMING_SNAKE_CASE__ : Dict , ) -> int:
super().__init__(**SCREAMING_SNAKE_CASE__ )
a_ : List[str] = hidden_size
a_ : Any = intermediate_size
a_ : str = num_hidden_layers
a_ : Dict = num_attention_heads
a_ : str = patch_size
a_ : Any = image_size
a_ : Dict = initializer_range
a_ : List[Any] = attention_dropout
a_ : Union[str, Any] = layer_norm_eps
a_ : Optional[Any] = hidden_act
a_ : Optional[Any] = qkv_bias
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Any , SCREAMING_SNAKE_CASE__ : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE__ : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE__ )
a_ , a_ : Dict = cls.get_config_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
a_ : List[Any] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : List[Any] = '''instructblip_qformer'''
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3_0_5_2_2 , SCREAMING_SNAKE_CASE__ : str=7_6_8 , SCREAMING_SNAKE_CASE__ : List[str]=1_2 , SCREAMING_SNAKE_CASE__ : Any=1_2 , SCREAMING_SNAKE_CASE__ : Optional[int]=3_0_7_2 , SCREAMING_SNAKE_CASE__ : List[str]="gelu" , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : int=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=5_1_2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.02 , SCREAMING_SNAKE_CASE__ : int=1E-12 , SCREAMING_SNAKE_CASE__ : Dict=0 , SCREAMING_SNAKE_CASE__ : Optional[Any]="absolute" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : str=1_4_0_8 , **SCREAMING_SNAKE_CASE__ : Dict , ) -> Optional[Any]:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = vocab_size
a_ : List[str] = hidden_size
a_ : Any = num_hidden_layers
a_ : Dict = num_attention_heads
a_ : Optional[int] = hidden_act
a_ : List[str] = intermediate_size
a_ : Tuple = hidden_dropout_prob
a_ : Union[str, Any] = attention_probs_dropout_prob
a_ : List[str] = max_position_embeddings
a_ : List[str] = initializer_range
a_ : Any = layer_norm_eps
a_ : Tuple = position_embedding_type
a_ : List[str] = cross_attention_frequency
a_ : Union[str, Any] = encoder_hidden_size
@classmethod
def SCREAMING_SNAKE_CASE ( cls : str , SCREAMING_SNAKE_CASE__ : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE__ )
a_ , a_ : Union[str, Any] = cls.get_config_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
a_ : Dict = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : Union[str, Any] = '''instructblip'''
snake_case__ : List[str] = True
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : int=3_2 , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Tuple:
super().__init__(**SCREAMING_SNAKE_CASE__ )
if vision_config is None:
a_ : Dict = {}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' )
if qformer_config is None:
a_ : List[str] = {}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' )
if text_config is None:
a_ : Any = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
a_ : int = InstructBlipVisionConfig(**SCREAMING_SNAKE_CASE__ )
a_ : int = InstructBlipQFormerConfig(**SCREAMING_SNAKE_CASE__ )
a_ : Optional[Any] = text_config['model_type'] if 'model_type' in text_config else 'opt'
a_ : List[Any] = CONFIG_MAPPING[text_model_type](**SCREAMING_SNAKE_CASE__ )
a_ : Tuple = self.text_config.tie_word_embeddings
a_ : List[Any] = self.text_config.is_encoder_decoder
a_ : Optional[Any] = num_query_tokens
a_ : int = self.vision_config.hidden_size
a_ : List[str] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
a_ : Optional[int] = 1.0
a_ : Any = 0.02
@classmethod
def SCREAMING_SNAKE_CASE ( cls : List[Any] , SCREAMING_SNAKE_CASE__ : InstructBlipVisionConfig , SCREAMING_SNAKE_CASE__ : InstructBlipQFormerConfig , SCREAMING_SNAKE_CASE__ : PretrainedConfig , **SCREAMING_SNAKE_CASE__ : List[str] , ) -> Any:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **SCREAMING_SNAKE_CASE__ , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
a_ : List[Any] = copy.deepcopy(self.__dict__ )
a_ : Optional[int] = self.vision_config.to_dict()
a_ : Optional[Any] = self.qformer_config.to_dict()
a_ : Union[str, Any] = self.text_config.to_dict()
a_ : Optional[Any] = self.__class__.model_type
return output
| 120
| 1
|
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCAmelCase ( a__ , a__ , a__ ) -> Optional[int]:
# Initialise PyTorch model
__a = RemBertConfig.from_json_file(a__ )
print('''Building PyTorch model from configuration: {}'''.format(str(a__ ) ) )
__a = RemBertModel(a__ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(a__ , a__ , a__ )
# Save pytorch-model
print('''Save PyTorch model to {}'''.format(a__ ) )
torch.save(model.state_dict() , a__ )
if __name__ == "__main__":
A : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
A : Tuple = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 6
|
"""simple docstring"""
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
SCREAMING_SNAKE_CASE : Union[str, Any] = """\
"""
SCREAMING_SNAKE_CASE : Any = """
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
"""
SCREAMING_SNAKE_CASE : Dict = """
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to 'cuda' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]
>>> results = perplexity.compute(model_id='gpt2',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
78.22
>>> print(round(results[\"perplexities\"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = datasets.load_dataset(\"wikitext\",
... \"wikitext-2-raw-v1\",
... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!='']
>>> results = perplexity.compute(model_id='gpt2',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
60.35
>>> print(round(results[\"perplexities\"][0], 2))
81.12
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''input_texts''': datasets.Value('''string''' ),
} ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ = 16 , a_ = True , a_=None ):
'''simple docstring'''
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
__snake_case : Optional[Any] = '''cuda'''
else:
__snake_case : Tuple = '''cuda''' if torch.cuda.is_available() else '''cpu'''
__snake_case : int = AutoModelForCausalLM.from_pretrained(a_ )
__snake_case : Optional[int] = model.to(a_ )
__snake_case : Optional[int] = AutoTokenizer.from_pretrained(a_ )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
__snake_case : List[Any] = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(a_ ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
__snake_case : List[Any] = model.config.max_length - 1
else:
__snake_case : Dict = model.config.max_length
__snake_case : Tuple = tokenizer(
a_ , add_special_tokens=a_ , padding=a_ , truncation=a_ , max_length=a_ , return_tensors='''pt''' , return_attention_mask=a_ , ).to(a_ )
__snake_case : List[Any] = encodings['''input_ids''']
__snake_case : str = encodings['''attention_mask''']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
__snake_case : Union[str, Any] = []
__snake_case : str = CrossEntropyLoss(reduction='''none''' )
for start_index in logging.tqdm(range(0 , len(a_ ) , a_ ) ):
__snake_case : Optional[int] = min(start_index + batch_size , len(a_ ) )
__snake_case : int = encoded_texts[start_index:end_index]
__snake_case : Optional[int] = attn_masks[start_index:end_index]
if add_start_token:
__snake_case : List[Any] = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(a_ )
__snake_case : Union[str, Any] = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
__snake_case : int = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(a_ ), attn_mask] , dim=1 )
__snake_case : List[Any] = encoded_batch
with torch.no_grad():
__snake_case : List[str] = model(a_ , attention_mask=a_ ).logits
__snake_case : List[str] = out_logits[..., :-1, :].contiguous()
__snake_case : int = labels[..., 1:].contiguous()
__snake_case : int = attn_mask[..., 1:].contiguous()
__snake_case : Tuple = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , a_ ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(a_ )}
| 102
| 0
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=_snake_case )
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : str = field(default='''question-answering-extractive''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
_snake_case : ClassVar[Features] = Features({'''question''': Value('''string''' ), '''context''': Value('''string''' )} )
_snake_case : ClassVar[Features] = Features(
{
'''answers''': Sequence(
{
'''text''': Value('''string''' ),
'''answer_start''': Value('''int32''' ),
} )
} )
_snake_case : str = "question"
_snake_case : str = "context"
_snake_case : str = "answers"
@property
def __UpperCAmelCase ( self ) -> Dict[str, str]:
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 145
|
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__UpperCAmelCase = '.'
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
__UpperCAmelCase = [
'Assert',
'AssignVariableOp',
'EmptyTensorList',
'MergeV2Checkpoints',
'ReadVariableOp',
'ResourceGather',
'RestoreV2',
'SaveV2',
'ShardedFilename',
'StatefulPartitionedCall',
'StaticRegexFullMatch',
'VarHandleOp',
]
def lowercase__ ( __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = SavedModel()
UpperCAmelCase_ : Optional[Any] = []
with open(os.path.join(__snake_case , 'utils' , 'tf_ops' , 'onnx.json' ) ) as f:
UpperCAmelCase_ : Optional[Any] = json.load(__snake_case )['opsets']
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(__snake_case )] )
with open(__snake_case , 'rb' ) as f:
saved_model.ParseFromString(f.read() )
UpperCAmelCase_ : List[Any] = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
UpperCAmelCase_ : Optional[int] = sorted(__snake_case )
UpperCAmelCase_ : int = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(__snake_case )
if strict and len(__snake_case ) > 0:
raise Exception(F"Found the following incompatible ops for the opset {opset}:\n" + incompatible_ops )
elif len(__snake_case ) > 0:
print(F"Found the following incompatible ops for the opset {opset}:" )
print(*__snake_case , sep='\n' )
else:
print(F"The saved model {saved_model_path} can properly be converted with ONNX." )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('--saved_model_path', help='Path of the saved model to check (the .pb file).')
parser.add_argument(
'--opset', default=12, type=int, help='The ONNX opset against which the model has to be tested.'
)
parser.add_argument(
'--framework', choices=['onnx'], default='onnx', help='Frameworks against which to test the saved model.'
)
parser.add_argument(
'--strict', action='store_true', help='Whether make the checking strict (raise errors) or not (raise warnings)'
)
__UpperCAmelCase = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 145
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json',
}
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = """gpt_neox_japanese"""
def __init__( self , lowercase_=3_2000 , lowercase_=2560 , lowercase_=32 , lowercase_=32 , lowercase_=4 , lowercase_="gelu" , lowercase_=1.00 , lowercase_=1_0000 , lowercase_=2048 , lowercase_=0.02 , lowercase_=1E-5 , lowercase_=True , lowercase_=3_1996 , lowercase_=3_1999 , lowercase_=0.1 , lowercase_=0.0 , **lowercase_ , ):
"""simple docstring"""
super().__init__(bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
UpperCAmelCase_ : str = vocab_size
UpperCAmelCase_ : Any = max_position_embeddings
UpperCAmelCase_ : Optional[int] = hidden_size
UpperCAmelCase_ : Tuple = num_hidden_layers
UpperCAmelCase_ : Optional[int] = num_attention_heads
UpperCAmelCase_ : Any = intermediate_multiple_size
UpperCAmelCase_ : Optional[int] = hidden_act
UpperCAmelCase_ : List[Any] = rotary_pct
UpperCAmelCase_ : List[str] = rotary_emb_base
UpperCAmelCase_ : str = initializer_range
UpperCAmelCase_ : Optional[int] = layer_norm_eps
UpperCAmelCase_ : Tuple = use_cache
UpperCAmelCase_ : Optional[int] = attention_dropout
UpperCAmelCase_ : Union[str, Any] = hidden_dropout
| 61
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : List[str] = logging.get_logger(__name__)
lowercase : Any = {
"""uclanlp/visualbert-vqa""": """https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json""",
"""uclanlp/visualbert-vqa-pre""": """https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json""",
"""uclanlp/visualbert-vqa-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json"""
),
"""uclanlp/visualbert-vcr""": """https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json""",
"""uclanlp/visualbert-vcr-pre""": """https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json""",
"""uclanlp/visualbert-vcr-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json"""
),
"""uclanlp/visualbert-nlvr2""": """https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json""",
"""uclanlp/visualbert-nlvr2-pre""": """https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json""",
"""uclanlp/visualbert-nlvr2-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json"""
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class __snake_case ( lowerCAmelCase ):
_a : Union[str, Any]= "visual_bert"
def __init__( self ,snake_case=30522 ,snake_case=768 ,snake_case=512 ,snake_case=12 ,snake_case=12 ,snake_case=3072 ,snake_case="gelu" ,snake_case=0.1 ,snake_case=0.1 ,snake_case=512 ,snake_case=2 ,snake_case=0.02 ,snake_case=1e-12 ,snake_case=False ,snake_case=True ,snake_case=1 ,snake_case=0 ,snake_case=2 ,**snake_case ,):
'''simple docstring'''
super().__init__(pad_token_id=snake_case ,bos_token_id=snake_case ,eos_token_id=snake_case ,**snake_case )
lowercase : Tuple = vocab_size
lowercase : int = max_position_embeddings
lowercase : Optional[Any] = hidden_size
lowercase : int = visual_embedding_dim
lowercase : Tuple = num_hidden_layers
lowercase : str = num_attention_heads
lowercase : Optional[Any] = intermediate_size
lowercase : str = hidden_act
lowercase : Tuple = hidden_dropout_prob
lowercase : List[Any] = attention_probs_dropout_prob
lowercase : Union[str, Any] = initializer_range
lowercase : int = type_vocab_size
lowercase : Union[str, Any] = layer_norm_eps
lowercase : Union[str, Any] = bypass_transformer
lowercase : int = special_visual_initialize
| 20
| 0
|
'''simple docstring'''
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training")
# TF training parameters
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def __lowerCamelCase ( __lowerCAmelCase : Namespace ) -> List[Any]:
return TrainCommand(__lowerCAmelCase )
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
@staticmethod
def lowerCAmelCase ( __snake_case : ArgumentParser )-> str:
snake_case = parser.add_parser("""train""" , help="""CLI tool to train a model on a task.""" )
train_parser.add_argument(
"""--train_data""" , type=__snake_case , required=__snake_case , help="""path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.""" , )
train_parser.add_argument(
"""--column_label""" , type=__snake_case , default=0 , help="""Column of the dataset csv file with example labels.""" )
train_parser.add_argument(
"""--column_text""" , type=__snake_case , default=1 , help="""Column of the dataset csv file with example texts.""" )
train_parser.add_argument(
"""--column_id""" , type=__snake_case , default=2 , help="""Column of the dataset csv file with example ids.""" )
train_parser.add_argument(
"""--skip_first_row""" , action="""store_true""" , help="""Skip the first row of the csv file (headers).""" )
train_parser.add_argument("""--validation_data""" , type=__snake_case , default="""""" , help="""path to validation dataset.""" )
train_parser.add_argument(
"""--validation_split""" , type=__snake_case , default=0.1 , help="""if validation dataset is not provided, fraction of train dataset to use as validation dataset.""" , )
train_parser.add_argument("""--output""" , type=__snake_case , default="""./""" , help="""path to saved the trained model.""" )
train_parser.add_argument(
"""--task""" , type=__snake_case , default="""text_classification""" , help="""Task to train the model on.""" )
train_parser.add_argument(
"""--model""" , type=__snake_case , default="""bert-base-uncased""" , help="""Model's name or path to stored model.""" )
train_parser.add_argument("""--train_batch_size""" , type=__snake_case , default=32 , help="""Batch size for training.""" )
train_parser.add_argument("""--valid_batch_size""" , type=__snake_case , default=64 , help="""Batch size for validation.""" )
train_parser.add_argument("""--learning_rate""" , type=__snake_case , default=3e-5 , help="""Learning rate.""" )
train_parser.add_argument("""--adam_epsilon""" , type=__snake_case , default=1e-08 , help="""Epsilon for Adam optimizer.""" )
train_parser.set_defaults(func=__snake_case )
def __init__( self : str , __snake_case : Namespace )-> Any:
snake_case = logging.get_logger("""transformers-cli/training""" )
snake_case = """tf""" if is_tf_available() else """torch"""
os.makedirs(args.output , exist_ok=__snake_case )
snake_case = args.output
snake_case = args.column_label
snake_case = args.column_text
snake_case = args.column_id
self.logger.info(f'''Loading {args.task} pipeline for {args.model}''' )
if args.task == "text_classification":
snake_case = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f'''Loading dataset from {args.train_data}''' )
snake_case = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
snake_case = None
if args.validation_data:
self.logger.info(f'''Loading validation dataset from {args.validation_data}''' )
snake_case = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
snake_case = args.validation_split
snake_case = args.train_batch_size
snake_case = args.valid_batch_size
snake_case = args.learning_rate
snake_case = args.adam_epsilon
def lowerCAmelCase ( self : List[Any] )-> List[str]:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def lowerCAmelCase ( self : Any )-> Tuple:
raise NotImplementedError
def lowerCAmelCase ( self : Dict )-> int:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 3
|
'''simple docstring'''
def __lowerCamelCase ( __lowerCAmelCase : Dict ) -> Optional[Any]:
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def __lowerCamelCase ( __lowerCAmelCase : dict[int, list[int]] ) -> list[tuple[int, int]]:
snake_case = 0
snake_case = len(__lowerCAmelCase ) # No of vertices in graph
snake_case = [0] * n
snake_case = [False] * n
def dfs(__lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] ):
snake_case = True
snake_case = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , id_ )
snake_case = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
snake_case = min(low[at] , low[to] )
snake_case = []
for i in range(__lowerCAmelCase ):
if not visited[i]:
dfs(__lowerCAmelCase , -1 , __lowerCAmelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3
| 1
|
'''simple docstring'''
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Dict ="M-CLIP"
def __init__( self : Tuple , a : Optional[int]=10_24 , a : Tuple=7_68 , **a : List[str] ):
"""simple docstring"""
__lowerCamelCase = transformerDimSize
__lowerCamelCase = imageDimSize
super().__init__(**a )
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Optional[Any] =MCLIPConfig
def __init__( self : str , a : List[Any] , *a : Dict , **a : str ):
"""simple docstring"""
super().__init__(a , *a , **a )
__lowerCamelCase = XLMRobertaModel(a )
__lowerCamelCase = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , a : int , a : List[Any] ):
"""simple docstring"""
__lowerCamelCase = self.transformer(input_ids=a , attention_mask=a )[0]
__lowerCamelCase = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(a ), embs
| 67
|
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Optional[int] =["image_processor", "tokenizer"]
lowerCamelCase : Union[str, Any] ="LayoutLMv2ImageProcessor"
lowerCamelCase : int =("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__( self : Optional[int] , a : Any=None , a : Any=None , **a : Union[str, Any] ):
"""simple docstring"""
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , a , )
__lowerCamelCase = kwargs.pop('''feature_extractor''' )
__lowerCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(a , a )
def __call__( self : Tuple , a : Optional[int] , a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , a : Union[List[List[int]], List[List[List[int]]]] = None , a : Optional[Union[List[int], List[List[int]]]] = None , a : bool = True , a : Union[bool, str, PaddingStrategy] = False , a : Union[bool, str, TruncationStrategy] = None , a : Optional[int] = None , a : int = 0 , a : Optional[int] = None , a : Optional[bool] = None , a : Optional[bool] = None , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = True , a : Optional[Union[str, TensorType]] = None , **a : Tuple , ):
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
__lowerCamelCase = self.image_processor(images=a , return_tensors=a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(a , a ):
__lowerCamelCase = [text] # add batch dimension (as the image processor always adds a batch dimension)
__lowerCamelCase = features['''words''']
__lowerCamelCase = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_token_type_ids=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_length=a , verbose=a , return_tensors=a , **a , )
# add pixel values
__lowerCamelCase = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
__lowerCamelCase = self.get_overflowing_images(a , encoded_inputs['''overflow_to_sample_mapping'''] )
__lowerCamelCase = images
return encoded_inputs
def SCREAMING_SNAKE_CASE__ ( self : Tuple , a : Optional[Any] , a : str ):
"""simple docstring"""
__lowerCamelCase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(a ) != len(a ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f""" {len(a )} and {len(a )}""" )
return images_with_overflow
def SCREAMING_SNAKE_CASE__ ( self : List[str] , *a : Optional[Any] , **a : Union[str, Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*a , **a )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , *a : Union[str, Any] , **a : Tuple ):
"""simple docstring"""
return self.tokenizer.decode(*a , **a )
@property
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , a , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , a , )
return self.image_processor
| 67
| 1
|
import numpy
class __snake_case :
def __init__( self ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Optional[Any] = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
lowercase : Tuple = numpy.random.rand(
self.input_array.shape[1] ,4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
lowercase : Union[str, Any] = numpy.random.rand(
4 ,3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
lowercase : Any = numpy.random.rand(3 ,1 )
# Real output values provided.
lowercase : Optional[Any] = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
lowercase : List[Any] = numpy.zeros(output_array.shape )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[Any] = sigmoid(
numpy.dot(self.input_array ,self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
lowercase : List[Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer ,self.first_hidden_layer_and_second_hidden_layer_weights ,) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
lowercase : Any = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer ,self.second_hidden_layer_and_output_layer_weights ,) )
return self.layer_between_second_hidden_layer_and_output
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : int = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T ,2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) ,)
lowercase : List[Any] = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T ,numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) ,self.second_hidden_layer_and_output_layer_weights.T ,)
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) ,)
lowercase : Tuple = numpy.dot(
self.input_array.T ,numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) ,self.second_hidden_layer_and_output_layer_weights.T ,)
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) ,self.first_hidden_layer_and_second_hidden_layer_weights.T ,)
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) ,)
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
for iteration in range(1 ,iterations + 1 ):
lowercase : Optional[int] = self.feedforward()
self.back_propagation()
if give_loss:
lowercase : Optional[int] = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f"Iteration {iteration} Loss: {loss}" )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Union[str, Any] = input_arr
lowercase : Optional[Any] = sigmoid(
numpy.dot(self.array ,self.input_layer_and_first_hidden_layer_weights ) )
lowercase : Union[str, Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer ,self.first_hidden_layer_and_second_hidden_layer_weights ,) )
lowercase : Tuple = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer ,self.second_hidden_layer_and_output_layer_weights ,) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> numpy.ndarray:
return 1 / (1 + numpy.exp(-value ))
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> numpy.ndarray:
return (value) * (1 - (value))
def _snake_case( ) -> int:
lowercase : Union[str, Any] = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
lowercase : Optional[int] = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
lowercase : Union[str, Any] = TwoHiddenLayerNeuralNetwork(
input_array=SCREAMING_SNAKE_CASE__ , output_array=SCREAMING_SNAKE_CASE__ )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=SCREAMING_SNAKE_CASE__ , iterations=10 , give_loss=SCREAMING_SNAKE_CASE__ )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 285
|
from ...processing_utils import ProcessorMixin
class __snake_case ( lowerCAmelCase ):
_a : Union[str, Any]= "WhisperFeatureExtractor"
_a : int= "WhisperTokenizer"
def __init__( self ,snake_case ,snake_case ):
'''simple docstring'''
super().__init__(snake_case ,snake_case )
lowercase : Optional[int] = self.feature_extractor
lowercase : Tuple = False
def _SCREAMING_SNAKE_CASE ( self ,snake_case=None ,snake_case=None ,snake_case=True ):
'''simple docstring'''
return self.tokenizer.get_decoder_prompt_ids(task=snake_case ,language=snake_case ,no_timestamps=snake_case )
def __call__( self ,*snake_case ,**snake_case ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*snake_case ,**snake_case )
lowercase : Optional[Any] = kwargs.pop("""audio""" ,snake_case )
lowercase : str = kwargs.pop("""sampling_rate""" ,snake_case )
lowercase : Dict = kwargs.pop("""text""" ,snake_case )
if len(snake_case ) > 0:
lowercase : List[Any] = args[0]
lowercase : Tuple = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
lowercase : Any = self.feature_extractor(snake_case ,*snake_case ,sampling_rate=snake_case ,**snake_case )
if text is not None:
lowercase : str = self.tokenizer(snake_case ,**snake_case )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowercase : List[Any] = encodings["""input_ids"""]
return inputs
def _SCREAMING_SNAKE_CASE ( self ,*snake_case ,**snake_case ):
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case ,**snake_case )
def _SCREAMING_SNAKE_CASE ( self ,*snake_case ,**snake_case ):
'''simple docstring'''
return self.tokenizer.decode(*snake_case ,**snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case="np" ):
'''simple docstring'''
return self.tokenizer.get_prompt_ids(snake_case ,return_tensors=snake_case )
| 285
| 1
|
'''simple docstring'''
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
__A : List[str] = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"text-classification",
"language-modeling",
"summarization",
"token-classification",
"question-answering",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
__A : Any = logging.getLogger()
def UpperCamelCase_ ( ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = argparse.ArgumentParser()
parser.add_argument("""-f""" )
lowerCAmelCase_ : Tuple = parser.parse_args()
return args.f
def UpperCamelCase_ ( A__ : List[str] , A__ : int="eval" ):
'''simple docstring'''
lowerCAmelCase_ : str = os.path.join(A__ , f'{split}_results.json' )
if os.path.exists(A__ ):
with open(A__ , """r""" ) as f:
return json.load(A__ )
raise ValueError(f'can\'t find {path}' )
__A : List[str] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
def __lowercase ( self : Any ) -> str:
lowerCAmelCase_ : int = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ : int = F'\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n '.split()
with patch.object(lowerCamelCase , """argv""" , lowerCamelCase ):
run_flax_glue.main()
lowerCAmelCase_ : Optional[int] = get_results(lowerCamelCase )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
@slow
def __lowercase ( self : int ) -> Tuple:
lowerCAmelCase_ : List[Any] = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ : Union[str, Any] = F'\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n '.split()
with patch.object(lowerCamelCase , """argv""" , lowerCamelCase ):
run_clm_flax.main()
lowerCAmelCase_ : Optional[int] = get_results(lowerCamelCase )
self.assertLess(result["""eval_perplexity"""] , 1_00 )
@slow
def __lowercase ( self : Tuple ) -> Optional[int]:
lowerCAmelCase_ : int = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ : List[str] = F'\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n '.split()
with patch.object(lowerCamelCase , """argv""" , lowerCamelCase ):
run_summarization_flax.main()
lowerCAmelCase_ : Optional[Any] = get_results(lowerCamelCase , split="""test""" )
self.assertGreaterEqual(result["""test_rouge1"""] , 10 )
self.assertGreaterEqual(result["""test_rouge2"""] , 2 )
self.assertGreaterEqual(result["""test_rougeL"""] , 7 )
self.assertGreaterEqual(result["""test_rougeLsum"""] , 7 )
@slow
def __lowercase ( self : List[Any] ) -> List[Any]:
lowerCAmelCase_ : int = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ : str = F'\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n '.split()
with patch.object(lowerCamelCase , """argv""" , lowerCamelCase ):
run_mlm_flax.main()
lowerCAmelCase_ : Tuple = get_results(lowerCamelCase )
self.assertLess(result["""eval_perplexity"""] , 42 )
@slow
def __lowercase ( self : List[Any] ) -> Union[str, Any]:
lowerCAmelCase_ : Dict = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ : Optional[Any] = F'\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n '.split()
with patch.object(lowerCamelCase , """argv""" , lowerCamelCase ):
run_ta_mlm_flax.main()
lowerCAmelCase_ : Dict = get_results(lowerCamelCase )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.42 )
@slow
def __lowercase ( self : Any ) -> Dict:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
lowerCAmelCase_ : int = 7 if get_gpu_count() > 1 else 2
lowerCAmelCase_ : Optional[Any] = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ : List[str] = F'\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n '.split()
with patch.object(lowerCamelCase , """argv""" , lowerCamelCase ):
run_flax_ner.main()
lowerCAmelCase_ : int = get_results(lowerCamelCase )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
self.assertGreaterEqual(result["""eval_f1"""] , 0.3 )
@slow
def __lowercase ( self : str ) -> Optional[Any]:
lowerCAmelCase_ : str = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ : Dict = F'\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n '.split()
with patch.object(lowerCamelCase , """argv""" , lowerCamelCase ):
run_qa.main()
lowerCAmelCase_ : Optional[Any] = get_results(lowerCamelCase )
self.assertGreaterEqual(result["""eval_f1"""] , 30 )
self.assertGreaterEqual(result["""eval_exact"""] , 30 )
| 120
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : List[Any] = logging.get_logger(__name__)
__A : Optional[Any] = {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = 'gpt_neox'
def __init__( self : Optional[int] , lowerCamelCase : Tuple=5_04_32 , lowerCamelCase : Optional[int]=61_44 , lowerCamelCase : Tuple=44 , lowerCamelCase : Any=64 , lowerCamelCase : List[Any]=2_45_76 , lowerCamelCase : List[Any]="gelu" , lowerCamelCase : Optional[Any]=0.25 , lowerCamelCase : Any=1_00_00 , lowerCamelCase : Any=0.0 , lowerCamelCase : str=0.0 , lowerCamelCase : Optional[int]=0.1 , lowerCamelCase : List[Any]=20_48 , lowerCamelCase : List[Any]=0.02 , lowerCamelCase : Any=1E-5 , lowerCamelCase : Dict=True , lowerCamelCase : Optional[int]=0 , lowerCamelCase : List[str]=2 , lowerCamelCase : Dict=False , lowerCamelCase : Tuple=True , lowerCamelCase : Optional[int]=None , **lowerCamelCase : int , ) -> Optional[Any]:
super().__init__(bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase )
lowerCAmelCase_ : Tuple = vocab_size
lowerCAmelCase_ : Union[str, Any] = max_position_embeddings
lowerCAmelCase_ : Any = hidden_size
lowerCAmelCase_ : List[Any] = num_hidden_layers
lowerCAmelCase_ : Optional[int] = num_attention_heads
lowerCAmelCase_ : str = intermediate_size
lowerCAmelCase_ : int = hidden_act
lowerCAmelCase_ : List[Any] = rotary_pct
lowerCAmelCase_ : Any = rotary_emb_base
lowerCAmelCase_ : List[str] = attention_dropout
lowerCAmelCase_ : Union[str, Any] = hidden_dropout
lowerCAmelCase_ : Tuple = classifier_dropout
lowerCAmelCase_ : Union[str, Any] = initializer_range
lowerCAmelCase_ : Any = layer_norm_eps
lowerCAmelCase_ : str = use_cache
lowerCAmelCase_ : str = tie_word_embeddings
lowerCAmelCase_ : str = use_parallel_residual
lowerCAmelCase_ : Any = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"""The hidden size is not divisble by the number of attention heads! Make sure to update them!""" )
def __lowercase ( self : List[str] ) -> List[str]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowerCamelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
F'got {self.rope_scaling}' )
lowerCAmelCase_ : Optional[Any] = self.rope_scaling.get("""type""" , lowerCamelCase )
lowerCAmelCase_ : int = self.rope_scaling.get("""factor""" , lowerCamelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(lowerCamelCase , lowerCamelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(F'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 120
| 1
|
"""simple docstring"""
import random
from typing import Any
def lowercase ( _SCREAMING_SNAKE_CASE : list ):
'''simple docstring'''
for _ in range(len(_SCREAMING_SNAKE_CASE ) ):
_UpperCAmelCase = random.randint(0 , len(_SCREAMING_SNAKE_CASE ) - 1 )
_UpperCAmelCase = random.randint(0 , len(_SCREAMING_SNAKE_CASE ) - 1 )
_UpperCAmelCase , _UpperCAmelCase = data[b], data[a]
return data
if __name__ == "__main__":
__A : Any = [0, 1, 2, 3, 4, 5, 6, 7]
__A : List[Any] = ["python", "says", "hello", "!"]
print("Fisher-Yates Shuffle:")
print("List", integers, strings)
print("FY Shuffle", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 326
|
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
with open(_SCREAMING_SNAKE_CASE ) as metadata_file:
_UpperCAmelCase = json.load(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = LukeConfig(use_entity_aware_attention=_SCREAMING_SNAKE_CASE , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
_UpperCAmelCase = torch.load(_SCREAMING_SNAKE_CASE , map_location='''cpu''' )['''module''']
# Load the entity vocab file
_UpperCAmelCase = load_original_entity_vocab(_SCREAMING_SNAKE_CASE )
# add an entry for [MASK2]
_UpperCAmelCase = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
_UpperCAmelCase = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
_UpperCAmelCase = AddedToken('''<ent>''' , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = AddedToken('''<ent2>''' , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
with open(os.path.join(_SCREAMING_SNAKE_CASE , '''tokenizer_config.json''' ) , '''r''' ) as f:
_UpperCAmelCase = json.load(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = '''MLukeTokenizer'''
with open(os.path.join(_SCREAMING_SNAKE_CASE , '''tokenizer_config.json''' ) , '''w''' ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
with open(os.path.join(_SCREAMING_SNAKE_CASE , MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = MLukeTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
# Initialize the embeddings of the special tokens
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(['''@'''] )[0]
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(['''#'''] )[0]
_UpperCAmelCase = state_dict['''embeddings.word_embeddings.weight''']
_UpperCAmelCase = word_emb[ent_init_index].unsqueeze(0 )
_UpperCAmelCase = word_emb[enta_init_index].unsqueeze(0 )
_UpperCAmelCase = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
_UpperCAmelCase = state_dict[bias_name]
_UpperCAmelCase = decoder_bias[ent_init_index].unsqueeze(0 )
_UpperCAmelCase = decoder_bias[enta_init_index].unsqueeze(0 )
_UpperCAmelCase = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_UpperCAmelCase = f'encoder.layer.{layer_index}.attention.self.'
_UpperCAmelCase = state_dict[prefix + matrix_name]
_UpperCAmelCase = state_dict[prefix + matrix_name]
_UpperCAmelCase = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_UpperCAmelCase = state_dict['''entity_embeddings.entity_embeddings.weight''']
_UpperCAmelCase = entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
_UpperCAmelCase = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
_UpperCAmelCase = state_dict['''entity_predictions.bias''']
_UpperCAmelCase = entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
_UpperCAmelCase = torch.cat([entity_prediction_bias, entity_mask_bias] )
_UpperCAmelCase = LukeForMaskedLM(config=_SCREAMING_SNAKE_CASE ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
_UpperCAmelCase = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
_UpperCAmelCase = state_dict[key]
else:
_UpperCAmelCase = state_dict[key]
_UpperCAmelCase , _UpperCAmelCase = model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
if set(_SCREAMING_SNAKE_CASE ) != {"luke.embeddings.position_ids"}:
raise ValueError(f'Unexpected unexpected_keys: {unexpected_keys}' )
if set(_SCREAMING_SNAKE_CASE ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f'Unexpected missing_keys: {missing_keys}' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
_UpperCAmelCase = MLukeTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE , task='''entity_classification''' )
_UpperCAmelCase = '''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
_UpperCAmelCase = (0, 9)
_UpperCAmelCase = tokenizer(_SCREAMING_SNAKE_CASE , entity_spans=[span] , return_tensors='''pt''' )
_UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_UpperCAmelCase = torch.Size((1, 33, 768) )
_UpperCAmelCase = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_UpperCAmelCase = torch.Size((1, 1, 768) )
_UpperCAmelCase = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
f' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
_UpperCAmelCase = MLukeTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = '''Tokyo is the capital of <mask>.'''
_UpperCAmelCase = (24, 30)
_UpperCAmelCase = tokenizer(_SCREAMING_SNAKE_CASE , entity_spans=[span] , return_tensors='''pt''' )
_UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = encoding['''input_ids'''][0].tolist()
_UpperCAmelCase = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
_UpperCAmelCase = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = outputs.entity_logits[0][0].argmax().item()
_UpperCAmelCase = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(_SCREAMING_SNAKE_CASE ) )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
def lowercase ( _SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
_UpperCAmelCase = ['''[MASK]''', '''[PAD]''', '''[UNK]''']
_UpperCAmelCase = [json.loads(_SCREAMING_SNAKE_CASE ) for line in open(_SCREAMING_SNAKE_CASE )]
_UpperCAmelCase = {}
for entry in data:
_UpperCAmelCase = entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
_UpperCAmelCase = entity_id
break
_UpperCAmelCase = f'{language}:{entity_name}'
_UpperCAmelCase = entity_id
return new_mapping
if __name__ == "__main__":
__A : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
__A : List[str] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 326
| 1
|
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
__a = logging.get_logger(__name__)
@add_end_docstrings(UpperCamelCase )
class A__ ( UpperCamelCase ):
"""simple docstring"""
def __init__( self : str , **lowerCAmelCase__ : Union[str, Any] ) -> Any:
"""simple docstring"""
super().__init__(**lowerCAmelCase__ )
if self.framework == "tf":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , "vision" )
self.check_model_type(lowerCAmelCase__ )
def __call__( self : Optional[Any] , lowerCAmelCase__ : Union[str, "Image.Image", List[Dict[str, Any]]] , lowerCAmelCase__ : Union[str, List[str]] = None , **lowerCAmelCase__ : Tuple , ) -> Any:
"""simple docstring"""
if "text_queries" in kwargs:
_UpperCAmelCase : int = kwargs.pop("text_queries" )
if isinstance(lowerCAmelCase__ , (str, Image.Image) ):
_UpperCAmelCase : Dict = {"image": image, "candidate_labels": candidate_labels}
else:
_UpperCAmelCase : Dict = image
_UpperCAmelCase : Optional[int] = super().__call__(lowerCAmelCase__ , **lowerCAmelCase__ )
return results
def _lowerCAmelCase ( self : Dict , **lowerCAmelCase__ : str ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Any = {}
if "threshold" in kwargs:
_UpperCAmelCase : Any = kwargs["threshold"]
if "top_k" in kwargs:
_UpperCAmelCase : Tuple = kwargs["top_k"]
return {}, {}, postprocess_params
def _lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase__ : List[Any] ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Dict = load_image(inputs["image"] )
_UpperCAmelCase : Optional[int] = inputs["candidate_labels"]
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : Union[str, Any] = candidate_labels.split("," )
_UpperCAmelCase : Dict = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(lowerCAmelCase__ ):
_UpperCAmelCase : Union[str, Any] = self.tokenizer(lowerCAmelCase__ , return_tensors=self.framework )
_UpperCAmelCase : str = self.image_processor(lowerCAmelCase__ , return_tensors=self.framework )
yield {
"is_last": i == len(lowerCAmelCase__ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def _lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase__ : Optional[int] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = model_inputs.pop("target_size" )
_UpperCAmelCase : Tuple = model_inputs.pop("candidate_label" )
_UpperCAmelCase : List[str] = model_inputs.pop("is_last" )
_UpperCAmelCase : Any = self.model(**lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase__ : str , lowerCAmelCase__ : Any=0.1 , lowerCAmelCase__ : Union[str, Any]=None ) -> str:
"""simple docstring"""
_UpperCAmelCase : List[Any] = []
for model_output in model_outputs:
_UpperCAmelCase : str = model_output["candidate_label"]
_UpperCAmelCase : str = BaseModelOutput(lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = self.image_processor.post_process_object_detection(
outputs=lowerCAmelCase__ , threshold=lowerCAmelCase__ , target_sizes=model_output["target_size"] )[0]
for index in outputs["scores"].nonzero():
_UpperCAmelCase : Union[str, Any] = outputs["scores"][index].item()
_UpperCAmelCase : Optional[int] = self._get_bounding_box(outputs["boxes"][index][0] )
_UpperCAmelCase : str = {"score": score, "label": label, "box": box}
results.append(lowerCAmelCase__ )
_UpperCAmelCase : str = sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : x["score"] , reverse=lowerCAmelCase__ )
if top_k:
_UpperCAmelCase : List[str] = results[:top_k]
return results
def _lowerCAmelCase ( self : List[str] , lowerCAmelCase__ : "torch.Tensor" ) -> Dict[str, int]:
"""simple docstring"""
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch." )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = box.int().tolist()
_UpperCAmelCase : List[Any] = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 145
|
'''simple docstring'''
def __UpperCAmelCase ( a_: int = 50 ):
_UpperCAmelCase : str = [1] * (length + 1)
for row_length in range(3, length + 1 ):
for block_length in range(3, row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'{solution() = }')
| 145
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase : List[Any] = {"configuration_mbart": ["MBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "MBartConfig", "MBartOnnxConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Union[str, Any] = ["MBartTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Tuple = ["MBartTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : List[str] = [
"MBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"MBartForCausalLM",
"MBartForConditionalGeneration",
"MBartForQuestionAnswering",
"MBartForSequenceClassification",
"MBartModel",
"MBartPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Union[str, Any] = [
"TFMBartForConditionalGeneration",
"TFMBartModel",
"TFMBartPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : str = [
"FlaxMBartForConditionalGeneration",
"FlaxMBartForQuestionAnswering",
"FlaxMBartForSequenceClassification",
"FlaxMBartModel",
"FlaxMBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 293
|
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self : str , *A : Dict , A : Optional[int]=None , A : Tuple=None , **A : Optional[int] ):
super().__init__(*A , **A )
__snake_case: List[Any] = eval_examples
__snake_case: str = post_process_function
def UpperCAmelCase__ ( self : List[Any] , A : Dict=None , A : int=None , A : List[Any]=None , A : str = "eval" ):
__snake_case: int = self.eval_dataset if eval_dataset is None else eval_dataset
__snake_case: Any = self.get_eval_dataloader(A )
__snake_case: Optional[Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__snake_case: Union[str, Any] = self.compute_metrics
__snake_case: List[str] = None
__snake_case: Tuple = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
__snake_case: Tuple = time.time()
try:
__snake_case: Any = eval_loop(
A , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A , metric_key_prefix=A , )
finally:
__snake_case: Optional[int] = compute_metrics
__snake_case: Union[str, Any] = self.args.eval_batch_size * self.args.world_size
if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
A , A , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__snake_case: List[str] = self.post_process_function(A , A , output.predictions )
__snake_case: List[Any] = self.compute_metrics(A )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
__snake_case: str = metrics.pop(A )
metrics.update(output.metrics )
else:
__snake_case: List[Any] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(A )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__snake_case: str = self.callback_handler.on_evaluate(self.args , self.state , self.control , A )
return metrics
def UpperCAmelCase__ ( self : Optional[Any] , A : List[Any] , A : List[str] , A : str=None , A : str = "test" ):
__snake_case: Optional[Any] = self.get_test_dataloader(A )
# Temporarily disable metric computation, we will do it in the loop here.
__snake_case: Optional[int] = self.compute_metrics
__snake_case: List[Any] = None
__snake_case: str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
__snake_case: Dict = time.time()
try:
__snake_case: str = eval_loop(
A , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A , metric_key_prefix=A , )
finally:
__snake_case: List[Any] = compute_metrics
__snake_case: Dict = self.args.eval_batch_size * self.args.world_size
if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
A , A , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
__snake_case: Union[str, Any] = self.post_process_function(A , A , output.predictions , """predict""" )
__snake_case: str = self.compute_metrics(A )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
__snake_case: List[str] = metrics.pop(A )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=A )
| 293
| 1
|
'''simple docstring'''
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training')
# TF training parameters
lowercase : Tuple = False
lowercase : str = False
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
return TrainCommand(snake_case__ )
class A ( __snake_case ):
@staticmethod
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
A : List[Any] = parser.add_parser('''train''' , help='''CLI tool to train a model on a task.''' )
train_parser.add_argument(
'''--train_data''' , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='''path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.''' , )
train_parser.add_argument(
'''--column_label''' , type=SCREAMING_SNAKE_CASE , default=0 , help='''Column of the dataset csv file with example labels.''' )
train_parser.add_argument(
'''--column_text''' , type=SCREAMING_SNAKE_CASE , default=1 , help='''Column of the dataset csv file with example texts.''' )
train_parser.add_argument(
'''--column_id''' , type=SCREAMING_SNAKE_CASE , default=2 , help='''Column of the dataset csv file with example ids.''' )
train_parser.add_argument(
'''--skip_first_row''' , action='''store_true''' , help='''Skip the first row of the csv file (headers).''' )
train_parser.add_argument('''--validation_data''' , type=SCREAMING_SNAKE_CASE , default='''''' , help='''path to validation dataset.''' )
train_parser.add_argument(
'''--validation_split''' , type=SCREAMING_SNAKE_CASE , default=0.1 , help='''if validation dataset is not provided, fraction of train dataset to use as validation dataset.''' , )
train_parser.add_argument('''--output''' , type=SCREAMING_SNAKE_CASE , default='''./''' , help='''path to saved the trained model.''' )
train_parser.add_argument(
'''--task''' , type=SCREAMING_SNAKE_CASE , default='''text_classification''' , help='''Task to train the model on.''' )
train_parser.add_argument(
'''--model''' , type=SCREAMING_SNAKE_CASE , default='''bert-base-uncased''' , help='''Model\'s name or path to stored model.''' )
train_parser.add_argument('''--train_batch_size''' , type=SCREAMING_SNAKE_CASE , default=32 , help='''Batch size for training.''' )
train_parser.add_argument('''--valid_batch_size''' , type=SCREAMING_SNAKE_CASE , default=64 , help='''Batch size for validation.''' )
train_parser.add_argument('''--learning_rate''' , type=SCREAMING_SNAKE_CASE , default=3e-5 , help='''Learning rate.''' )
train_parser.add_argument('''--adam_epsilon''' , type=SCREAMING_SNAKE_CASE , default=1e-08 , help='''Epsilon for Adam optimizer.''' )
train_parser.set_defaults(func=SCREAMING_SNAKE_CASE )
def __init__( self , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
A : Any = logging.get_logger('''transformers-cli/training''' )
A : List[Any] = '''tf''' if is_tf_available() else '''torch'''
os.makedirs(args.output , exist_ok=SCREAMING_SNAKE_CASE )
A : List[str] = args.output
A : Any = args.column_label
A : Optional[Any] = args.column_text
A : Optional[Any] = args.column_id
self.logger.info(F'Loading {args.task} pipeline for {args.model}' )
if args.task == "text_classification":
A : int = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F'Loading dataset from {args.train_data}' )
A : Any = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
A : Any = None
if args.validation_data:
self.logger.info(F'Loading validation dataset from {args.validation_data}' )
A : Tuple = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
A : Tuple = args.validation_split
A : Optional[int] = args.train_batch_size
A : Dict = args.valid_batch_size
A : Any = args.learning_rate
A : Any = args.adam_epsilon
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
raise NotImplementedError
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 3
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase : Optional[Any] = logging.get_logger(__name__)
class A ( __snake_case ):
__magic_name__ = ['''pixel_values''']
def __init__( self , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = 1 / 255 , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> None:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE )
A : str = size if size is not None else {'''shortest_edge''': 384}
A : Tuple = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
A : str = do_resize
A : List[Any] = size
# Default value set here for backwards compatibility where the value in config is None
A : List[Any] = crop_pct if crop_pct is not None else 224 / 256
A : Optional[int] = resample
A : Union[str, Any] = do_rescale
A : List[str] = rescale_factor
A : Union[str, Any] = do_normalize
A : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> np.ndarray:
"""simple docstring"""
A : str = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
if "shortest_edge" not in size:
raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
A : Any = size['''shortest_edge''']
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
A : Dict = int(shortest_edge / crop_pct )
A : str = get_resize_output_image_size(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
A : int = resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=SCREAMING_SNAKE_CASE , size=(shortest_edge, shortest_edge) , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
SCREAMING_SNAKE_CASE , size=(shortest_edge, shortest_edge) , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
return rescale(SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> np.ndarray:
"""simple docstring"""
return normalize(SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE , ) -> PIL.Image.Image:
"""simple docstring"""
A : int = do_resize if do_resize is not None else self.do_resize
A : Tuple = crop_pct if crop_pct is not None else self.crop_pct
A : Optional[Any] = resample if resample is not None else self.resample
A : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
A : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
A : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
A : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
A : List[str] = image_std if image_std is not None else self.image_std
A : Union[str, Any] = size if size is not None else self.size
A : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
A : Any = make_list_of_images(SCREAMING_SNAKE_CASE )
if not valid_images(SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError('''crop_pct must be specified if size < 384.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
A : Optional[int] = [to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
A : Any = [self.resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , crop_pct=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
A : str = [self.rescale(image=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
A : Dict = [self.normalize(image=SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE ) for image in images]
A : Any = [to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images]
A : Optional[int] = {'''pixel_values''': images}
return BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
| 3
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"""google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/config.json""",
"""google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/config.json"""
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class _lowerCAmelCase ( snake_case_ ):
__UpperCAmelCase : Union[str, Any] = '''fnet'''
def __init__( self , UpperCamelCase__=3_2000 , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__="gelu_new" , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=4 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-12 , UpperCamelCase__=False , UpperCamelCase__=512 , UpperCamelCase__=3 , UpperCamelCase__=1 , UpperCamelCase__=2 , **UpperCamelCase__ , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
snake_case : Optional[int] = vocab_size
snake_case : List[Any] = max_position_embeddings
snake_case : Tuple = hidden_size
snake_case : Any = num_hidden_layers
snake_case : Optional[int] = intermediate_size
snake_case : List[str] = hidden_act
snake_case : str = hidden_dropout_prob
snake_case : List[Any] = initializer_range
snake_case : str = type_vocab_size
snake_case : Optional[Any] = layer_norm_eps
snake_case : Dict = use_tpu_fourier_optimizations
snake_case : Union[str, Any] = tpu_short_seq_length
| 361
|
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _lowerCAmelCase :
@staticmethod
def lowerCamelCase ( *UpperCamelCase__ , **UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
__UpperCAmelCase : List[Any] = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
snake_case : List[Any] = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
snake_case : int = [
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
]
return object_detector, examples
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
snake_case : str = object_detector(examples[0] , threshold=0.0 )
snake_case : str = len(UpperCamelCase__ )
self.assertGreater(UpperCamelCase__ , 0 )
self.assertEqual(
UpperCamelCase__ , [
{
"score": ANY(UpperCamelCase__ ),
"label": ANY(UpperCamelCase__ ),
"box": {"xmin": ANY(UpperCamelCase__ ), "ymin": ANY(UpperCamelCase__ ), "xmax": ANY(UpperCamelCase__ ), "ymax": ANY(UpperCamelCase__ )},
}
for i in range(UpperCamelCase__ )
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
pass
@require_torch
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
snake_case : Dict = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
snake_case : Optional[Any] = object_detector(
"./tests/fixtures/tests_samples/COCO/000000039769.png" , candidate_labels=["cat", "remote", "couch"] , threshold=0.64 , )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{"score": 0.7235, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7218, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7184, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.6748, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6656, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6614, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6456, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
{"score": 0.642, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}},
{"score": 0.6419, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
] , )
snake_case : Dict = object_detector(
[
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
[
{"score": 0.7235, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7218, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7184, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.6748, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6656, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6614, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6456, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
{"score": 0.642, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}},
{"score": 0.6419, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
]
] , )
@require_torch
@slow
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
snake_case : Optional[int] = pipeline("zero-shot-object-detection" )
snake_case : Tuple = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
] , )
snake_case : List[Any] = object_detector(
[
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
] , )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
[
{"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
],
[
{"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
],
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
pass
@require_torch
@slow
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
snake_case : Optional[Any] = 0.2
snake_case : List[str] = pipeline("zero-shot-object-detection" )
snake_case : List[Any] = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , threshold=UpperCamelCase__ , )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
] , )
@require_torch
@slow
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
snake_case : List[Any] = 2
snake_case : Optional[Any] = pipeline("zero-shot-object-detection" )
snake_case : Optional[int] = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , top_k=UpperCamelCase__ , )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
] , )
| 112
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : int = logging.get_logger(__name__)
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
snake_case_ = [144, 192, 240]
snake_case_ = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
snake_case_ = [96, 120, 144]
snake_case_ = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
snake_case_ = [64, 80, 96]
snake_case_ = [16, 16, 24, 48, 64, 80, 320]
snake_case_ = 0.05
snake_case_ = 2.0
if mobilevit_name.startswith('deeplabv3_' ):
snake_case_ = 512
snake_case_ = 16
snake_case_ = 21
snake_case_ = 'pascal-voc-id2label.json'
else:
snake_case_ = 1000
snake_case_ = 'imagenet-1k-id2label.json'
snake_case_ = 'huggingface/label-files'
snake_case_ = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) , 'r' ) )
snake_case_ = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
return config
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__=False ):
'''simple docstring'''
for i in range(1 , 6 ):
if F'''layer_{i}.''' in name:
snake_case_ = name.replace(F'''layer_{i}.''' , F'''encoder.layer.{i - 1}.''' )
if "conv_1." in name:
snake_case_ = name.replace('conv_1.' , 'conv_stem.' )
if ".block." in name:
snake_case_ = name.replace('.block.' , '.' )
if "exp_1x1" in name:
snake_case_ = name.replace('exp_1x1' , 'expand_1x1' )
if "red_1x1" in name:
snake_case_ = name.replace('red_1x1' , 'reduce_1x1' )
if ".local_rep.conv_3x3." in name:
snake_case_ = name.replace('.local_rep.conv_3x3.' , '.conv_kxk.' )
if ".local_rep.conv_1x1." in name:
snake_case_ = name.replace('.local_rep.conv_1x1.' , '.conv_1x1.' )
if ".norm." in name:
snake_case_ = name.replace('.norm.' , '.normalization.' )
if ".conv." in name:
snake_case_ = name.replace('.conv.' , '.convolution.' )
if ".conv_proj." in name:
snake_case_ = name.replace('.conv_proj.' , '.conv_projection.' )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F'''.{i}.{j}.''' in name:
snake_case_ = name.replace(F'''.{i}.{j}.''' , F'''.{i}.layer.{j}.''' )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F'''.{i}.{j}.''' in name:
snake_case_ = name.replace(F'''.{i}.{j}.''' , F'''.{i}.''' )
if "expand_1x1" in name:
snake_case_ = name.replace('expand_1x1' , 'downsampling_layer.expand_1x1' )
if "conv_3x3" in name:
snake_case_ = name.replace('conv_3x3' , 'downsampling_layer.conv_3x3' )
if "reduce_1x1" in name:
snake_case_ = name.replace('reduce_1x1' , 'downsampling_layer.reduce_1x1' )
for i in range(2 , 5 ):
if F'''.global_rep.{i}.weight''' in name:
snake_case_ = name.replace(F'''.global_rep.{i}.weight''' , '.layernorm.weight' )
if F'''.global_rep.{i}.bias''' in name:
snake_case_ = name.replace(F'''.global_rep.{i}.bias''' , '.layernorm.bias' )
if ".global_rep." in name:
snake_case_ = name.replace('.global_rep.' , '.transformer.' )
if ".pre_norm_mha.0." in name:
snake_case_ = name.replace('.pre_norm_mha.0.' , '.layernorm_before.' )
if ".pre_norm_mha.1.out_proj." in name:
snake_case_ = name.replace('.pre_norm_mha.1.out_proj.' , '.attention.output.dense.' )
if ".pre_norm_ffn.0." in name:
snake_case_ = name.replace('.pre_norm_ffn.0.' , '.layernorm_after.' )
if ".pre_norm_ffn.1." in name:
snake_case_ = name.replace('.pre_norm_ffn.1.' , '.intermediate.dense.' )
if ".pre_norm_ffn.4." in name:
snake_case_ = name.replace('.pre_norm_ffn.4.' , '.output.dense.' )
if ".transformer." in name:
snake_case_ = name.replace('.transformer.' , '.transformer.layer.' )
if ".aspp_layer." in name:
snake_case_ = name.replace('.aspp_layer.' , '.' )
if ".aspp_pool." in name:
snake_case_ = name.replace('.aspp_pool.' , '.' )
if "seg_head." in name:
snake_case_ = name.replace('seg_head.' , 'segmentation_head.' )
if "segmentation_head.classifier.classifier." in name:
snake_case_ = name.replace('segmentation_head.classifier.classifier.' , 'segmentation_head.classifier.' )
if "classifier.fc." in name:
snake_case_ = name.replace('classifier.fc.' , 'classifier.' )
elif (not base_model) and ("segmentation_head." not in name):
snake_case_ = 'mobilevit.' + name
return name
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ):
'''simple docstring'''
if base_model:
snake_case_ = ''
else:
snake_case_ = 'mobilevit.'
for key in orig_state_dict.copy().keys():
snake_case_ = orig_state_dict.pop(UpperCamelCase__ )
if key[:8] == "encoder.":
snake_case_ = key[8:]
if "qkv" in key:
snake_case_ = key.split('.' )
snake_case_ = int(key_split[0][6:] ) - 1
snake_case_ = int(key_split[3] )
snake_case_ = model.get_submodule(F'''{model_prefix}encoder.layer.{layer_num}''' )
snake_case_ = layer.transformer.layer[transformer_num].attention.attention.all_head_size
snake_case_ = (
F'''{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'''
)
if "weight" in key:
snake_case_ = val[:dim, :]
snake_case_ = val[dim : dim * 2, :]
snake_case_ = val[-dim:, :]
else:
snake_case_ = val[:dim]
snake_case_ = val[dim : dim * 2]
snake_case_ = val[-dim:]
else:
snake_case_ = val
return orig_state_dict
def __lowerCamelCase ( ):
'''simple docstring'''
snake_case_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case_ = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ):
'''simple docstring'''
snake_case_ = get_mobilevit_config(UpperCamelCase__ )
# load original state_dict
snake_case_ = torch.load(UpperCamelCase__ , map_location='cpu' )
# load 🤗 model
if mobilevit_name.startswith('deeplabv3_' ):
snake_case_ = MobileViTForSemanticSegmentation(UpperCamelCase__ ).eval()
else:
snake_case_ = MobileViTForImageClassification(UpperCamelCase__ ).eval()
snake_case_ = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
# Check outputs on an image, prepared by MobileViTImageProcessor
snake_case_ = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
snake_case_ = image_processor(images=prepare_img() , return_tensors='pt' )
snake_case_ = model(**UpperCamelCase__ )
snake_case_ = outputs.logits
if mobilevit_name.startswith('deeplabv3_' ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
snake_case_ = torch.tensor(
[
[[6.20_65, 6.12_92, 6.20_70], [6.10_79, 6.12_54, 6.17_47], [6.00_42, 6.10_71, 6.10_34]],
[[-6.92_53, -6.86_53, -7.03_98], [-7.32_18, -7.39_83, -7.36_70], [-7.19_61, -7.24_82, -7.15_69]],
[[-4.47_23, -4.43_48, -4.37_69], [-5.36_29, -5.46_32, -5.45_98], [-5.15_87, -5.34_02, -5.50_59]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
snake_case_ = torch.tensor(
[
[[5.44_49, 5.57_33, 5.63_14], [5.18_15, 5.39_30, 5.59_63], [5.16_56, 5.43_33, 5.48_53]],
[[-9.44_23, -9.77_66, -9.67_14], [-9.15_81, -9.57_20, -9.55_19], [-9.10_06, -9.64_58, -9.57_03]],
[[-7.77_21, -7.37_16, -7.15_83], [-8.45_99, -8.06_24, -7.79_44], [-8.41_72, -7.83_66, -7.50_25]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
snake_case_ = torch.tensor(
[
[[6.98_11, 6.97_43, 7.31_23], [7.17_77, 7.19_31, 7.39_38], [7.56_33, 7.80_50, 7.89_01]],
[[-10.55_36, -10.23_32, -10.29_24], [-10.23_36, -9.86_24, -9.59_64], [-10.88_40, -10.81_58, -10.66_59]],
[[-3.49_38, -3.06_31, -2.86_20], [-3.42_05, -2.81_35, -2.68_75], [-3.41_79, -2.79_45, -2.87_50]],
] )
else:
raise ValueError(F'''Unknown mobilevit_name: {mobilevit_name}''' )
assert torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1E-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
snake_case_ = torch.tensor([-0.98_66, 0.23_92, -1.12_41] )
elif mobilevit_name == "mobilevit_xs":
snake_case_ = torch.tensor([-2.47_61, -0.93_99, -1.95_87] )
elif mobilevit_name == "mobilevit_xxs":
snake_case_ = torch.tensor([-1.93_64, -1.23_27, -0.46_53] )
else:
raise ValueError(F'''Unknown mobilevit_name: {mobilevit_name}''' )
assert torch.allclose(logits[0, :3] , UpperCamelCase__ , atol=1E-4 )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(F'''Saving model {mobilevit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
snake_case_ = {
'mobilevit_s': 'mobilevit-small',
'mobilevit_xs': 'mobilevit-x-small',
'mobilevit_xxs': 'mobilevit-xx-small',
'deeplabv3_mobilevit_s': 'deeplabv3-mobilevit-small',
'deeplabv3_mobilevit_xs': 'deeplabv3-mobilevit-x-small',
'deeplabv3_mobilevit_xxs': 'deeplabv3-mobilevit-xx-small',
}
print('Pushing to the hub...' )
snake_case_ = model_mapping[mobilevit_name]
image_processor.push_to_hub(UpperCamelCase__ , organization='apple' )
model.push_to_hub(UpperCamelCase__ , organization='apple' )
if __name__ == "__main__":
_UpperCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--mobilevit_name""",
default="""mobilevit_s""",
type=str,
help=(
"""Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs',"""
""" 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'."""
),
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_UpperCAmelCase : Tuple = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 285
|
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = {}
snake_case_ = tokenizer(example['content'] , truncation=UpperCamelCase__ )['input_ids']
snake_case_ = len(example['content'] ) / len(output['input_ids'] )
return output
_UpperCAmelCase : Dict = HfArgumentParser(PretokenizationArguments)
_UpperCAmelCase : List[Any] = parser.parse_args()
if args.num_workers is None:
_UpperCAmelCase : Union[str, Any] = multiprocessing.cpu_count()
_UpperCAmelCase : int = AutoTokenizer.from_pretrained(args.tokenizer_dir)
_UpperCAmelCase : Optional[int] = time.time()
_UpperCAmelCase : List[str] = load_dataset(args.dataset_name, split="""train""")
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
_UpperCAmelCase : Tuple = time.time()
_UpperCAmelCase : Union[str, Any] = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"""repo_name""",
"""path""",
"""copies""",
"""size""",
"""content""",
"""license""",
"""hash""",
"""line_mean""",
"""line_max""",
"""alpha_frac""",
"""autogenerated""",
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
_UpperCAmelCase : Dict = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 285
| 1
|
import random
def UpperCAmelCase_ ( _A , _A , _A = False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = {i: [] for i in range(_A )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(_A )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(_A ):
for j in range(i + 1 , _A ):
if random.random() < probability:
graph[i].append(_A )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(_A )
return graph
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
return {
i: [j for j in range(_A ) if i != j] for i in range(_A )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 218
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = ["input_features"]
def __init__( self : Dict , __lowerCamelCase : Tuple=80 , __lowerCamelCase : List[Any]=1_6000 , __lowerCamelCase : Optional[int]=160 , __lowerCamelCase : List[str]=30 , __lowerCamelCase : List[Any]=400 , __lowerCamelCase : Union[str, Any]=0.0 , __lowerCamelCase : str=False , **__lowerCamelCase : List[str] , ) -> Any:
super().__init__(
feature_size=__lowerCamelCase , sampling_rate=__lowerCamelCase , padding_value=__lowerCamelCase , return_attention_mask=__lowerCamelCase , **__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = n_fft
SCREAMING_SNAKE_CASE__ = hop_length
SCREAMING_SNAKE_CASE__ = chunk_length
SCREAMING_SNAKE_CASE__ = chunk_length * sampling_rate
SCREAMING_SNAKE_CASE__ = self.n_samples // hop_length
SCREAMING_SNAKE_CASE__ = sampling_rate
SCREAMING_SNAKE_CASE__ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__lowerCamelCase , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=__lowerCamelCase , norm='''slaney''' , mel_scale='''slaney''' , )
def lowercase_ ( self : int , __lowerCamelCase : np.array ) -> np.ndarray:
SCREAMING_SNAKE_CASE__ = spectrogram(
__lowerCamelCase , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='''log10''' , )
SCREAMING_SNAKE_CASE__ = log_spec[:, :-1]
SCREAMING_SNAKE_CASE__ = np.maximum(__lowerCamelCase , log_spec.max() - 8.0 )
SCREAMING_SNAKE_CASE__ = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def lowercase_ ( __lowerCamelCase : List[np.ndarray] , __lowerCamelCase : List[np.ndarray] , __lowerCamelCase : float = 0.0 ) -> List[np.ndarray]:
if attention_mask is not None:
SCREAMING_SNAKE_CASE__ = np.array(__lowerCamelCase , np.intaa )
SCREAMING_SNAKE_CASE__ = []
for vector, length in zip(__lowerCamelCase , attention_mask.sum(-1 ) ):
SCREAMING_SNAKE_CASE__ = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
SCREAMING_SNAKE_CASE__ = padding_value
normed_input_values.append(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE__ = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self : List[str] , __lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[str] = "max_length" , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , **__lowerCamelCase : List[str] , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
SCREAMING_SNAKE_CASE__ = isinstance(__lowerCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
SCREAMING_SNAKE_CASE__ = is_batched_numpy or (
isinstance(__lowerCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE__ = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__lowerCamelCase , np.ndarray ):
SCREAMING_SNAKE_CASE__ = np.asarray(__lowerCamelCase , dtype=np.floataa )
elif isinstance(__lowerCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE__ = [np.asarray([raw_speech] ).T]
SCREAMING_SNAKE_CASE__ = BatchFeature({'''input_features''': raw_speech} )
# convert into correct format for padding
SCREAMING_SNAKE_CASE__ = self.pad(
__lowerCamelCase , padding=__lowerCamelCase , max_length=max_length if max_length else self.n_samples , truncation=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
SCREAMING_SNAKE_CASE__ = self.zero_mean_unit_var_norm(
padded_inputs['''input_features'''] , attention_mask=padded_inputs['''attention_mask'''] , padding_value=self.padding_value , )
SCREAMING_SNAKE_CASE__ = np.stack(padded_inputs['''input_features'''] , axis=0 )
# make sure list is in array format
SCREAMING_SNAKE_CASE__ = padded_inputs.get('''input_features''' ).transpose(2 , 0 , 1 )
SCREAMING_SNAKE_CASE__ = [self._np_extract_fbank_features(__lowerCamelCase ) for waveform in input_features[0]]
if isinstance(input_features[0] , __lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = [np.asarray(__lowerCamelCase , dtype=np.floataa ) for feature in input_features]
else:
SCREAMING_SNAKE_CASE__ = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
SCREAMING_SNAKE_CASE__ = padded_inputs['''attention_mask'''][:, :: self.hop_length]
if return_tensors is not None:
SCREAMING_SNAKE_CASE__ = padded_inputs.convert_to_tensors(__lowerCamelCase )
return padded_inputs
def lowercase_ ( self : str ) -> Dict[str, Any]:
SCREAMING_SNAKE_CASE__ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE__ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 218
| 1
|
import random
from typing import Any
def lowerCAmelCase__( lowercase : list ) -> list[Any]:
for _ in range(len(lowercase ) ):
__snake_case : Union[str, Any] = random.randint(0 , len(lowercase ) - 1 )
__snake_case : Optional[int] = random.randint(0 , len(lowercase ) - 1 )
__snake_case , __snake_case : Any = data[b], data[a]
return data
if __name__ == "__main__":
_UpperCamelCase = [0, 1, 2, 3, 4, 5, 6, 7]
_UpperCamelCase = ['''python''', '''says''', '''hello''', '''!''']
print('''Fisher-Yates Shuffle:''')
print('''List''', integers, strings)
print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 326
|
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
_UpperCamelCase = logging.get_logger(__name__)
class _lowerCamelCase :
"""simple docstring"""
UpperCAmelCase_ : str
UpperCAmelCase_ : str =None
@staticmethod
def UpperCAmelCase ( ) -> Optional[int]:
'''simple docstring'''
raise NotImplementedError
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> List[str]:
'''simple docstring'''
raise NotImplementedError
def UpperCAmelCase ( self , UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
raise NotImplementedError
def UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
if not self.is_available():
raise RuntimeError(
F"""You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.""" )
@classmethod
def UpperCAmelCase ( cls ) -> Tuple:
'''simple docstring'''
return F"""`pip install {cls.pip_package or cls.name}`"""
class _lowerCamelCase ( a ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] ="optuna"
@staticmethod
def UpperCAmelCase ( ) -> Union[str, Any]:
'''simple docstring'''
return is_optuna_available()
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Dict:
'''simple docstring'''
return run_hp_search_optuna(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
return default_hp_space_optuna(UpperCAmelCase )
class _lowerCamelCase ( a ):
"""simple docstring"""
UpperCAmelCase_ : List[str] ="ray"
UpperCAmelCase_ : Dict ="'ray[tune]'"
@staticmethod
def UpperCAmelCase ( ) -> str:
'''simple docstring'''
return is_ray_available()
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
return run_hp_search_ray(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
return default_hp_space_ray(UpperCAmelCase )
class _lowerCamelCase ( a ):
"""simple docstring"""
UpperCAmelCase_ : Tuple ="sigopt"
@staticmethod
def UpperCAmelCase ( ) -> int:
'''simple docstring'''
return is_sigopt_available()
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
return run_hp_search_sigopt(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> Dict:
'''simple docstring'''
return default_hp_space_sigopt(UpperCAmelCase )
class _lowerCamelCase ( a ):
"""simple docstring"""
UpperCAmelCase_ : str ="wandb"
@staticmethod
def UpperCAmelCase ( ) -> Optional[Any]:
'''simple docstring'''
return is_wandb_available()
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return run_hp_search_wandb(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
return default_hp_space_wandb(UpperCAmelCase )
_UpperCamelCase = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def lowerCAmelCase__( ) -> str:
__snake_case : Optional[int] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(lowercase ) > 0:
__snake_case : Dict = available_backends[0].name
if len(lowercase ) > 1:
logger.info(
f"""{len(lowercase )} hyperparameter search backends available. Using {name} as the default.""" )
return name
raise RuntimeError(
"No hyperparameter search backend available.\n"
+ "\n".join(
f""" - To install {backend.name} run {backend.pip_install()}"""
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 326
| 1
|
"""simple docstring"""
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case ( __snake_case, unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Dict = CodeGenTokenizer
SCREAMING_SNAKE_CASE_ : Any = CodeGenTokenizerFast
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
SCREAMING_SNAKE_CASE_ : List[Any] = {"""add_prefix_space""": True}
SCREAMING_SNAKE_CASE_ : List[str] = False
def lowercase_ ( self : Dict)-> List[Any]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowerCAmelCase: int = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
__lowerCAmelCase: List[Any] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__))))
__lowerCAmelCase: List[Any] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
__lowerCAmelCase: Optional[int] = {"unk_token": "<unk>"}
__lowerCAmelCase: List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
__lowerCAmelCase: Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as fp:
fp.write(json.dumps(UpperCamelCase__) + "\n")
with open(self.merges_file , "w" , encoding="utf-8") as fp:
fp.write("\n".join(UpperCamelCase__))
def lowercase_ ( self : List[str] , **UpperCamelCase__ : List[str])-> Any:
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__)
def lowercase_ ( self : Dict , **UpperCamelCase__ : List[Any])-> List[str]:
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__)
def lowercase_ ( self : str , UpperCamelCase__ : Tuple)-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: Any = "lower newer"
__lowerCAmelCase: Dict = "lower newer"
return input_text, output_text
def lowercase_ ( self : Tuple)-> List[Any]:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
__lowerCAmelCase: Union[str, Any] = "lower newer"
__lowerCAmelCase: Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
__lowerCAmelCase: Tuple = tokenizer.tokenize(UpperCamelCase__ , add_prefix_space=UpperCamelCase__)
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__)
__lowerCAmelCase: Tuple = tokens + [tokenizer.unk_token]
__lowerCAmelCase: List[Any] = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__) , UpperCamelCase__)
def lowercase_ ( self : Tuple)-> Any:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__lowerCAmelCase: int = self.get_tokenizer()
__lowerCAmelCase: str = self.get_rust_tokenizer(add_prefix_space=UpperCamelCase__)
__lowerCAmelCase: List[Any] = "lower newer"
# Testing tokenization
__lowerCAmelCase: Optional[Any] = tokenizer.tokenize(UpperCamelCase__ , add_prefix_space=UpperCamelCase__)
__lowerCAmelCase: Dict = rust_tokenizer.tokenize(UpperCamelCase__)
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__)
# Testing conversion to ids without special tokens
__lowerCAmelCase: Optional[Any] = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__)
__lowerCAmelCase: str = rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__)
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__)
# Testing conversion to ids with special tokens
__lowerCAmelCase: Dict = self.get_rust_tokenizer(add_prefix_space=UpperCamelCase__)
__lowerCAmelCase: List[Any] = tokenizer.encode(UpperCamelCase__ , add_prefix_space=UpperCamelCase__)
__lowerCAmelCase: Union[str, Any] = rust_tokenizer.encode(UpperCamelCase__)
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__)
# Testing the unknown token
__lowerCAmelCase: Any = tokens + [rust_tokenizer.unk_token]
__lowerCAmelCase: List[Any] = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(UpperCamelCase__) , UpperCamelCase__)
def lowercase_ ( self : Optional[Any] , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Union[str, Any])-> str:
'''simple docstring'''
pass
def lowercase_ ( self : Any , UpperCamelCase__ : int=1_5)-> int:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
__lowerCAmelCase: List[str] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__)
# Simple input
__lowerCAmelCase: Optional[int] = "This is a simple input"
__lowerCAmelCase: Dict = ["This is a simple input 1", "This is a simple input 2"]
__lowerCAmelCase: Tuple = ("This is a simple input", "This is a pair")
__lowerCAmelCase: List[Any] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="max_length")
# Simple input
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="max_length")
# Simple input
self.assertRaises(
UpperCamelCase__ , tokenizer_r.batch_encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="max_length" , )
# Pair input
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="max_length")
# Pair input
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="max_length")
# Pair input
self.assertRaises(
UpperCamelCase__ , tokenizer_r.batch_encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="max_length" , )
def lowercase_ ( self : Optional[int])-> str:
'''simple docstring'''
__lowerCAmelCase: str = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>")
# Simple input
__lowerCAmelCase: Dict = "This is a simple input"
__lowerCAmelCase: List[str] = ["This is a simple input looooooooong", "This is a simple input"]
__lowerCAmelCase: str = ("This is a simple input", "This is a pair")
__lowerCAmelCase: Tuple = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
__lowerCAmelCase: Union[str, Any] = tokenizer.pad_token_id
__lowerCAmelCase: List[Any] = tokenizer(UpperCamelCase__ , padding="max_length" , max_length=3_0 , return_tensors="np")
__lowerCAmelCase: List[Any] = tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , truncate=UpperCamelCase__ , return_tensors="np")
__lowerCAmelCase: Dict = tokenizer(*UpperCamelCase__ , padding="max_length" , max_length=6_0 , return_tensors="np")
__lowerCAmelCase: Dict = tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , truncate=UpperCamelCase__ , return_tensors="np")
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 3_0)
self.assertTrue(pad_token_id in out_s["input_ids"])
self.assertTrue(0 in out_s["attention_mask"])
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 3_3)
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0])
self.assertFalse(0 in out_sa["attention_mask"][0])
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1])
self.assertTrue(0 in out_sa["attention_mask"][1])
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 6_0)
self.assertTrue(pad_token_id in out_p["input_ids"])
self.assertTrue(0 in out_p["attention_mask"])
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 5_2)
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0])
self.assertFalse(0 in out_pa["attention_mask"][0])
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1])
self.assertTrue(0 in out_pa["attention_mask"][1])
def lowercase_ ( self : Any)-> str:
'''simple docstring'''
__lowerCAmelCase: Any = "$$$"
__lowerCAmelCase: Dict = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=UpperCamelCase__ , add_bos_token=UpperCamelCase__)
__lowerCAmelCase: int = "This is a simple input"
__lowerCAmelCase: int = ["This is a simple input 1", "This is a simple input 2"]
__lowerCAmelCase: int = tokenizer.bos_token_id
__lowerCAmelCase: Optional[int] = tokenizer(UpperCamelCase__)
__lowerCAmelCase: List[str] = tokenizer(UpperCamelCase__)
self.assertEqual(out_s.input_ids[0] , UpperCamelCase__)
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids))
__lowerCAmelCase: Tuple = tokenizer.decode(out_s.input_ids)
__lowerCAmelCase: Optional[int] = tokenizer.batch_decode(out_sa.input_ids)
self.assertEqual(decode_s.split()[0] , UpperCamelCase__)
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa))
@slow
def lowercase_ ( self : Any)-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-mono")
__lowerCAmelCase: List[str] = "\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"
__lowerCAmelCase: int = "\nif len_a > len_b: result = a\nelse: result = b"
__lowerCAmelCase: List[Any] = tokenizer.encode(UpperCamelCase__)
__lowerCAmelCase: List[Any] = ["^#", re.escape("<|endoftext|>"), "^'''", "^\"\"\"", "\n\n\n"]
__lowerCAmelCase: Tuple = tokenizer.decode(UpperCamelCase__ , truncate_before_pattern=UpperCamelCase__)
self.assertEqual(UpperCamelCase__ , UpperCamelCase__)
def lowercase_ ( self : Dict)-> str:
'''simple docstring'''
pass
| 108
|
"""simple docstring"""
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
return int((input_a, input_a).count(0 ) == 0 )
def a__ ( ) -> None:
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 108
| 1
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
'stabilityai/stable-diffusion-2' , revision='bf16' , dtype=jnp.bfloataa , )
lowerCAmelCase__ :Dict = 'A painting of a squirrel eating a burger'
lowerCAmelCase__ :List[Any] = jax.device_count()
lowerCAmelCase__ :Dict = num_samples * [prompt]
lowerCAmelCase__ :Tuple = sd_pipe.prepare_inputs(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = replicate(__UpperCAmelCase )
lowerCAmelCase__ :Dict = shard(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = jax.random.PRNGKey(0 )
lowerCAmelCase__ :Optional[Any] = jax.random.split(__UpperCAmelCase , jax.device_count() )
lowerCAmelCase__ :List[Any] = sd_pipe(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , num_inference_steps=2_5 , jit=__UpperCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
lowerCAmelCase__ :str = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCAmelCase__ :Any = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
lowerCAmelCase__ :Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCAmelCase__ :Any = jnp.array([0.42_38, 0.44_14, 0.43_95, 0.44_53, 0.46_29, 0.45_90, 0.45_31, 0.4_55_08, 0.45_12] )
print(F"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = 'stabilityai/stable-diffusion-2'
lowerCAmelCase__ , lowerCAmelCase__ :int = FlaxDPMSolverMultistepScheduler.from_pretrained(__UpperCAmelCase , subfolder='scheduler' )
lowerCAmelCase__ , lowerCAmelCase__ :Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
__UpperCAmelCase , scheduler=__UpperCAmelCase , revision='bf16' , dtype=jnp.bfloataa , )
lowerCAmelCase__ :Union[str, Any] = scheduler_params
lowerCAmelCase__ :Dict = 'A painting of a squirrel eating a burger'
lowerCAmelCase__ :Optional[Any] = jax.device_count()
lowerCAmelCase__ :Dict = num_samples * [prompt]
lowerCAmelCase__ :Dict = sd_pipe.prepare_inputs(__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = replicate(__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = shard(__UpperCAmelCase )
lowerCAmelCase__ :Dict = jax.random.PRNGKey(0 )
lowerCAmelCase__ :Optional[int] = jax.random.split(__UpperCAmelCase , jax.device_count() )
lowerCAmelCase__ :str = sd_pipe(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , num_inference_steps=2_5 , jit=__UpperCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
lowerCAmelCase__ :Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCAmelCase__ :Dict = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
lowerCAmelCase__ :List[Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCAmelCase__ :Optional[int] = jnp.array([0.43_36, 0.4_29_69, 0.44_53, 0.41_99, 0.42_97, 0.45_31, 0.44_34, 0.44_34, 0.42_97] )
print(F"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 293
|
"""simple docstring"""
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Dict:
"""simple docstring"""
lowerCAmelCase__ :str = BertConfig.from_json_file(_SCREAMING_SNAKE_CASE )
print(F"Building PyTorch model from configuration: {config}" )
lowerCAmelCase__ :int = BertForPreTraining(_SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_bert(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__A = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 293
| 1
|
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''facebook/detr-resnet-50''': '''https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json''',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class _snake_case ( _lowercase ):
lowerCamelCase__: str = "detr"
lowerCamelCase__: Dict = ["past_key_values"]
lowerCamelCase__: str = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self: List[str] , __lowerCamelCase: List[Any]=True , __lowerCamelCase: Any=None , __lowerCamelCase: Dict=3 , __lowerCamelCase: str=1_00 , __lowerCamelCase: Union[str, Any]=6 , __lowerCamelCase: Union[str, Any]=20_48 , __lowerCamelCase: Dict=8 , __lowerCamelCase: Optional[int]=6 , __lowerCamelCase: List[Any]=20_48 , __lowerCamelCase: int=8 , __lowerCamelCase: Tuple=0.0 , __lowerCamelCase: Dict=0.0 , __lowerCamelCase: Any=True , __lowerCamelCase: Tuple="relu" , __lowerCamelCase: Tuple=2_56 , __lowerCamelCase: Dict=0.1 , __lowerCamelCase: Union[str, Any]=0.0 , __lowerCamelCase: Optional[int]=0.0 , __lowerCamelCase: Union[str, Any]=0.02 , __lowerCamelCase: str=1.0 , __lowerCamelCase: List[str]=False , __lowerCamelCase: Dict="sine" , __lowerCamelCase: Optional[int]="resnet50" , __lowerCamelCase: Optional[int]=True , __lowerCamelCase: int=False , __lowerCamelCase: Union[str, Any]=1 , __lowerCamelCase: Tuple=5 , __lowerCamelCase: int=2 , __lowerCamelCase: Dict=1 , __lowerCamelCase: Dict=1 , __lowerCamelCase: Union[str, Any]=5 , __lowerCamelCase: Dict=2 , __lowerCamelCase: int=0.1 , **__lowerCamelCase: str , ) -> int:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__UpperCAmelCase : Optional[int] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : List[Any] = backbone_config.get("model_type" )
__UpperCAmelCase : List[str] = CONFIG_MAPPING[backbone_model_type]
__UpperCAmelCase : List[str] = config_class.from_dict(__lowerCamelCase )
# set timm attributes to None
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = None, None, None
__UpperCAmelCase : Any = use_timm_backbone
__UpperCAmelCase : Optional[Any] = backbone_config
__UpperCAmelCase : Optional[Any] = num_channels
__UpperCAmelCase : List[Any] = num_queries
__UpperCAmelCase : Optional[int] = d_model
__UpperCAmelCase : Optional[Any] = encoder_ffn_dim
__UpperCAmelCase : Dict = encoder_layers
__UpperCAmelCase : List[Any] = encoder_attention_heads
__UpperCAmelCase : int = decoder_ffn_dim
__UpperCAmelCase : Tuple = decoder_layers
__UpperCAmelCase : int = decoder_attention_heads
__UpperCAmelCase : List[Any] = dropout
__UpperCAmelCase : Dict = attention_dropout
__UpperCAmelCase : Optional[Any] = activation_dropout
__UpperCAmelCase : int = activation_function
__UpperCAmelCase : Any = init_std
__UpperCAmelCase : str = init_xavier_std
__UpperCAmelCase : int = encoder_layerdrop
__UpperCAmelCase : Tuple = decoder_layerdrop
__UpperCAmelCase : List[Any] = encoder_layers
__UpperCAmelCase : Optional[Any] = auxiliary_loss
__UpperCAmelCase : int = position_embedding_type
__UpperCAmelCase : Optional[int] = backbone
__UpperCAmelCase : str = use_pretrained_backbone
__UpperCAmelCase : Dict = dilation
# Hungarian matcher
__UpperCAmelCase : Optional[int] = class_cost
__UpperCAmelCase : Optional[Any] = bbox_cost
__UpperCAmelCase : Optional[int] = giou_cost
# Loss coefficients
__UpperCAmelCase : Any = mask_loss_coefficient
__UpperCAmelCase : Any = dice_loss_coefficient
__UpperCAmelCase : Any = bbox_loss_coefficient
__UpperCAmelCase : Optional[int] = giou_loss_coefficient
__UpperCAmelCase : Optional[Any] = eos_coefficient
super().__init__(is_encoder_decoder=__lowerCamelCase , **__lowerCamelCase )
@property
def _lowerCamelCase ( self: Dict ) -> int:
return self.encoder_attention_heads
@property
def _lowerCamelCase ( self: str ) -> int:
return self.d_model
@classmethod
def _lowerCamelCase ( cls: Optional[int] , __lowerCamelCase: PretrainedConfig , **__lowerCamelCase: List[Any] ) -> List[Any]:
return cls(backbone_config=__lowerCamelCase , **__lowerCamelCase )
def _lowerCamelCase ( self: str ) -> Dict[str, any]:
__UpperCAmelCase : Optional[int] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
__UpperCAmelCase : int = self.backbone_config.to_dict()
__UpperCAmelCase : List[str] = self.__class__.model_type
return output
class _snake_case ( _lowercase ):
lowerCamelCase__: Optional[int] = version.parse("1.11" )
@property
def _lowerCamelCase ( self: Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def _lowerCamelCase ( self: Optional[Any] ) -> float:
return 1e-5
@property
def _lowerCamelCase ( self: List[str] ) -> int:
return 12
| 342
|
import numpy as np
import datasets
_snake_case = '''
Compute the Mahalanobis Distance
Mahalonobis distance is the distance between a point and a distribution.
And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.
It was introduced by Prof. P. C. Mahalanobis in 1936
and has been used in various statistical applications ever since
[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]
'''
_snake_case = '''\
@article{de2000mahalanobis,
title={The mahalanobis distance},
author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},
journal={Chemometrics and intelligent laboratory systems},
volume={50},
number={1},
pages={1--18},
year={2000},
publisher={Elsevier}
}
'''
_snake_case = '''
Args:
X: List of datapoints to be compared with the `reference_distribution`.
reference_distribution: List of datapoints from the reference distribution we want to compare to.
Returns:
mahalanobis: The Mahalonobis distance for each datapoint in `X`.
Examples:
>>> mahalanobis_metric = datasets.load_metric("mahalanobis")
>>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])
>>> print(results)
{\'mahalanobis\': array([0.5])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def _lowerCamelCase ( self: List[str] ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ),
} ) , )
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: int , __lowerCamelCase: Union[str, Any] ) -> List[str]:
# convert to numpy arrays
__UpperCAmelCase : int = np.array(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = np.array(__lowerCamelCase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("Expected `X` to be a 2D vector" )
if len(reference_distribution.shape ) != 2:
raise ValueError("Expected `reference_distribution` to be a 2D vector" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" )
# Get mahalanobis distance for each prediction
__UpperCAmelCase : str = X - np.mean(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = np.cov(reference_distribution.T )
try:
__UpperCAmelCase : int = np.linalg.inv(__lowerCamelCase )
except np.linalg.LinAlgError:
__UpperCAmelCase : Optional[int] = np.linalg.pinv(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = np.dot(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Optional[int] = np.dot(__lowerCamelCase , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 342
| 1
|
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {'''vocab_file''': '''vocab.json'''}
_lowercase = {
'''vocab_file''': {
'''mgp-str''': '''https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json''',
}
}
_lowercase = {'''mgp-str''': 27}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Any = VOCAB_FILES_NAMES
_lowerCamelCase: Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase: Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[Any] ,A_ : str ,A_ : List[Any]="[GO]" ,A_ : Any="[GO]" ,A_ : Union[str, Any]="[s]" ,A_ : Dict="[GO]" ,**A_ : Any ) -> str:
super().__init__(
unk_token=A_ ,bos_token=A_ ,eos_token=A_ ,pad_token=A_ ,**A_ ,)
with open(A_ ,encoding='utf-8' ) as vocab_handle:
A = json.load(A_ )
A = {v: k for k, v in self.vocab.items()}
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
return len(self.vocab )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
return dict(self.vocab ,**self.added_tokens_encoder )
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : List[Any] ) -> Union[str, Any]:
A = []
for s in text:
char_tokens.extend(A_ )
return char_tokens
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : List[Any] ) -> List[Any]:
return self.vocab.get(A_ ,self.vocab.get(self.unk_token ) )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Optional[int] ) -> str:
return self.decoder.get(A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(A_ ):
logger.error('Vocabulary path ({}) should be a directory'.format(A_ ) )
return
A = os.path.join(
A_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
with open(A_ ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.vocab ,indent=2 ,sort_keys=A_ ,ensure_ascii=A_ ) + '\n' )
return (vocab_file,)
| 74
|
'''simple docstring'''
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
UpperCamelCase__ : str = logging.getLogger(__name__)
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : Union[str, Any] = '''summarization'''
_A : Optional[Any] = ['''loss''']
_A : Tuple = ROUGE_KEYS
_A : int = '''rouge2'''
def __init__( self : int , lowerCAmelCase__ : Union[str, Any] , **lowerCAmelCase__ : List[str] ):
"""simple docstring"""
if hparams.sortish_sampler and hparams.gpus > 1:
__SCREAMING_SNAKE_CASE : Any = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("""Dynamic Batch size does not work for multi-gpu training""" )
if hparams.sortish_sampler:
raise ValueError("""--sortish_sampler and --max_tokens_per_batch may not be used simultaneously""" )
super().__init__(lowerCAmelCase__ , num_labels=lowerCAmelCase__ , mode=self.mode , **lowerCAmelCase__ )
use_task_specific_params(self.model , """summarization""" )
save_git_info(self.hparams.output_dir )
__SCREAMING_SNAKE_CASE : int = Path(self.output_dir ) / """metrics.json"""
__SCREAMING_SNAKE_CASE : Optional[Any] = Path(self.output_dir ) / """hparams.pkl"""
pickle_save(self.hparams , self.hparams_save_path )
__SCREAMING_SNAKE_CASE : Optional[int] = 0
__SCREAMING_SNAKE_CASE : List[Any] = defaultdict(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = self.config.model_type
__SCREAMING_SNAKE_CASE : List[Any] = self.config.tgt_vocab_size if self.model_type == """fsmt""" else self.config.vocab_size
__SCREAMING_SNAKE_CASE : dict = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
__SCREAMING_SNAKE_CASE : List[Any] = {
"""train""": self.hparams.n_train,
"""val""": self.hparams.n_val,
"""test""": self.hparams.n_test,
}
__SCREAMING_SNAKE_CASE : Any = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
__SCREAMING_SNAKE_CASE : Any = {
"""train""": self.hparams.max_target_length,
"""val""": self.hparams.val_max_target_length,
"""test""": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F"target_lens: {self.target_lens}"
assert self.target_lens["train"] <= self.target_lens["test"], F"target_lens: {self.target_lens}"
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
__SCREAMING_SNAKE_CASE : Any = get_git_info()["""repo_sha"""]
__SCREAMING_SNAKE_CASE : Any = hparams.num_workers
__SCREAMING_SNAKE_CASE : Tuple = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Tuple = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
__SCREAMING_SNAKE_CASE : Any = self.decoder_start_token_id
__SCREAMING_SNAKE_CASE : Optional[int] = (
SeqaSeqDataset if hasattr(self.tokenizer , """prepare_seq2seq_batch""" ) else LegacySeqaSeqDataset
)
__SCREAMING_SNAKE_CASE : Dict = False
__SCREAMING_SNAKE_CASE : Any = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = self.hparams.eval_max_gen_length
else:
__SCREAMING_SNAKE_CASE : Optional[int] = self.model.config.max_length
__SCREAMING_SNAKE_CASE : Optional[Any] = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def UpperCamelCase__ ( self : str , lowerCAmelCase__ : Dict[str, torch.Tensor] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
k: self.tokenizer.batch_decode(v.tolist() ) if """mask""" not in k else v.shape for k, v in batch.items()
}
save_json(lowerCAmelCase__ , Path(self.output_dir ) / """text_batch.json""" )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / """tok_batch.json""" )
__SCREAMING_SNAKE_CASE : Optional[int] = True
return readable_batch
def UpperCamelCase__ ( self : List[str] , lowerCAmelCase__ : Any , **lowerCAmelCase__ : Optional[Any] ):
"""simple docstring"""
return self.model(lowerCAmelCase__ , **lowerCAmelCase__ )
def UpperCamelCase__ ( self : Optional[int] , lowerCAmelCase__ : List[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = self.tokenizer.batch_decode(
lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ )
return lmap(str.strip , lowerCAmelCase__ )
def UpperCamelCase__ ( self : Any , lowerCAmelCase__ : dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = self.tokenizer.pad_token_id
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = batch["""input_ids"""], batch["""attention_mask"""]
__SCREAMING_SNAKE_CASE : Tuple = batch["""labels"""]
if isinstance(self.model , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : str = self.model._shift_right(lowerCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = shift_tokens_right(lowerCAmelCase__ , lowerCAmelCase__ )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
__SCREAMING_SNAKE_CASE : Tuple = decoder_input_ids
self.save_readable_batch(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = self(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , decoder_input_ids=lowerCAmelCase__ , use_cache=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = outputs["""logits"""]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
__SCREAMING_SNAKE_CASE : Tuple = nn.CrossEntropyLoss(ignore_index=lowerCAmelCase__ )
assert lm_logits.shape[-1] == self.vocab_size
__SCREAMING_SNAKE_CASE : List[Any] = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
__SCREAMING_SNAKE_CASE : List[Any] = nn.functional.log_softmax(lowerCAmelCase__ , dim=-1 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = label_smoothed_nll_loss(
lowerCAmelCase__ , lowerCAmelCase__ , self.hparams.label_smoothing , ignore_index=lowerCAmelCase__ )
return (loss,)
@property
def UpperCamelCase__ ( self : List[Any] ):
"""simple docstring"""
return self.tokenizer.pad_token_id
def UpperCamelCase__ ( self : Dict , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = self._step(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = dict(zip(self.loss_names , lowerCAmelCase__ ) )
# tokens per batch
__SCREAMING_SNAKE_CASE : Optional[int] = batch["""input_ids"""].ne(self.pad ).sum() + batch["""labels"""].ne(self.pad ).sum()
__SCREAMING_SNAKE_CASE : str = batch["""input_ids"""].shape[0]
__SCREAMING_SNAKE_CASE : str = batch["""input_ids"""].eq(self.pad ).sum()
__SCREAMING_SNAKE_CASE : Optional[int] = batch["""input_ids"""].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def UpperCamelCase__ ( self : Dict , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : str ):
"""simple docstring"""
return self._generative_step(lowerCAmelCase__ )
def UpperCamelCase__ ( self : Optional[int] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any]="val" ):
"""simple docstring"""
self.step_count += 1
__SCREAMING_SNAKE_CASE : int = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
__SCREAMING_SNAKE_CASE : List[Any] = losses["""loss"""]
__SCREAMING_SNAKE_CASE : int = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["""gen_time""", """gen_len"""]
}
__SCREAMING_SNAKE_CASE : List[Any] = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
__SCREAMING_SNAKE_CASE : torch.FloatTensor = torch.tensor(lowerCAmelCase__ ).type_as(lowerCAmelCase__ )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = {F"{prefix}_avg_{k}": x for k, x in losses.items()}
__SCREAMING_SNAKE_CASE : Optional[int] = self.step_count
self.metrics[prefix].append(lowerCAmelCase__ ) # callback writes this to self.metrics_save_path
__SCREAMING_SNAKE_CASE : int = flatten_list([x["""preds"""] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F"{prefix}_loss": loss,
F"{prefix}_{self.val_metric}": metric_tensor,
}
def UpperCamelCase__ ( self : Optional[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int ):
"""simple docstring"""
return calculate_rouge(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase__ ( self : Tuple , lowerCAmelCase__ : dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
__SCREAMING_SNAKE_CASE : List[str] = self.model.generate(
batch["""input_ids"""] , attention_mask=batch["""attention_mask"""] , use_cache=lowerCAmelCase__ , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = (time.time() - ta) / batch["""input_ids"""].shape[0]
__SCREAMING_SNAKE_CASE : List[str] = self.ids_to_clean_text(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = self.ids_to_clean_text(batch["""labels"""] )
__SCREAMING_SNAKE_CASE : Optional[Any] = self._step(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = dict(zip(self.loss_names , lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Dict = self.calc_generative_metrics(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = np.mean(lmap(lowerCAmelCase__ , lowerCAmelCase__ ) )
base_metrics.update(gen_time=lowerCAmelCase__ , gen_len=lowerCAmelCase__ , preds=lowerCAmelCase__ , target=lowerCAmelCase__ , **lowerCAmelCase__ )
return base_metrics
def UpperCamelCase__ ( self : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
return self._generative_step(lowerCAmelCase__ )
def UpperCamelCase__ ( self : List[Any] , lowerCAmelCase__ : int ):
"""simple docstring"""
return self.validation_epoch_end(lowerCAmelCase__ , prefix="""test""" )
def UpperCamelCase__ ( self : Dict , lowerCAmelCase__ : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = self.n_obs[type_path]
__SCREAMING_SNAKE_CASE : str = self.target_lens[type_path]
__SCREAMING_SNAKE_CASE : str = self.dataset_class(
self.tokenizer , type_path=lowerCAmelCase__ , n_obs=lowerCAmelCase__ , max_target_length=lowerCAmelCase__ , **self.dataset_kwargs , )
return dataset
def UpperCamelCase__ ( self : str , lowerCAmelCase__ : str , lowerCAmelCase__ : int , lowerCAmelCase__ : bool = False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dataset(lowerCAmelCase__ )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
__SCREAMING_SNAKE_CASE : Optional[int] = dataset.make_sortish_sampler(lowerCAmelCase__ , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , collate_fn=dataset.collate_fn , shuffle=lowerCAmelCase__ , num_workers=self.num_workers , sampler=lowerCAmelCase__ , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
__SCREAMING_SNAKE_CASE : Any = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowerCAmelCase__ , batch_sampler=lowerCAmelCase__ , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , collate_fn=dataset.collate_fn , shuffle=lowerCAmelCase__ , num_workers=self.num_workers , sampler=lowerCAmelCase__ , )
def UpperCamelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dataloader("""train""" , batch_size=self.hparams.train_batch_size , shuffle=lowerCAmelCase__ )
return dataloader
def UpperCamelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
return self.get_dataloader("""val""" , batch_size=self.hparams.eval_batch_size )
def UpperCamelCase__ ( self : Tuple ):
"""simple docstring"""
return self.get_dataloader("""test""" , batch_size=self.hparams.eval_batch_size )
@staticmethod
def UpperCamelCase__ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
BaseTransformer.add_model_specific_args(lowerCAmelCase__ , lowerCAmelCase__ )
add_generic_args(lowerCAmelCase__ , lowerCAmelCase__ )
parser.add_argument(
"""--max_source_length""" , default=1_0_2_4 , type=lowerCAmelCase__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--max_target_length""" , default=5_6 , type=lowerCAmelCase__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--val_max_target_length""" , default=1_4_2 , type=lowerCAmelCase__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--test_max_target_length""" , default=1_4_2 , type=lowerCAmelCase__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument("""--freeze_encoder""" , action="""store_true""" )
parser.add_argument("""--freeze_embeds""" , action="""store_true""" )
parser.add_argument("""--sortish_sampler""" , action="""store_true""" , default=lowerCAmelCase__ )
parser.add_argument("""--overwrite_output_dir""" , action="""store_true""" , default=lowerCAmelCase__ )
parser.add_argument("""--max_tokens_per_batch""" , type=lowerCAmelCase__ , default=lowerCAmelCase__ )
parser.add_argument("""--logger_name""" , type=lowerCAmelCase__ , choices=["""default""", """wandb""", """wandb_shared"""] , default="""default""" )
parser.add_argument("""--n_train""" , type=lowerCAmelCase__ , default=-1 , required=lowerCAmelCase__ , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_val""" , type=lowerCAmelCase__ , default=5_0_0 , required=lowerCAmelCase__ , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_test""" , type=lowerCAmelCase__ , default=-1 , required=lowerCAmelCase__ , help="""# examples. -1 means use all.""" )
parser.add_argument(
"""--task""" , type=lowerCAmelCase__ , default="""summarization""" , required=lowerCAmelCase__ , help="""# examples. -1 means use all.""" )
parser.add_argument("""--label_smoothing""" , type=lowerCAmelCase__ , default=0.0 , required=lowerCAmelCase__ )
parser.add_argument("""--src_lang""" , type=lowerCAmelCase__ , default="""""" , required=lowerCAmelCase__ )
parser.add_argument("""--tgt_lang""" , type=lowerCAmelCase__ , default="""""" , required=lowerCAmelCase__ )
parser.add_argument("""--eval_beams""" , type=lowerCAmelCase__ , default=lowerCAmelCase__ , required=lowerCAmelCase__ )
parser.add_argument(
"""--val_metric""" , type=lowerCAmelCase__ , default=lowerCAmelCase__ , required=lowerCAmelCase__ , choices=["""bleu""", """rouge2""", """loss""", None] )
parser.add_argument("""--eval_max_gen_length""" , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help="""never generate more than n tokens""" )
parser.add_argument("""--save_top_k""" , type=lowerCAmelCase__ , default=1 , required=lowerCAmelCase__ , help="""How many checkpoints to save""" )
parser.add_argument(
"""--early_stopping_patience""" , type=lowerCAmelCase__ , default=-1 , required=lowerCAmelCase__ , help=(
"""-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"""
""" val_check_interval will effect it."""
) , )
return parser
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : List[Any] = '''translation'''
_A : int = ['''loss''']
_A : Union[str, Any] = ['''bleu''']
_A : Dict = '''bleu'''
def __init__( self : Any , lowerCAmelCase__ : int , **lowerCAmelCase__ : Any ):
"""simple docstring"""
super().__init__(lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = hparams.src_lang
__SCREAMING_SNAKE_CASE : Dict = hparams.tgt_lang
def UpperCamelCase__ ( self : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
return calculate_bleu(lowerCAmelCase__ , lowerCAmelCase__ )
def lowerCAmelCase_ ( _lowerCamelCase: Optional[int] , _lowerCamelCase: str=None ):
Path(args.output_dir ).mkdir(exist_ok=_lowerCamelCase )
check_output_dir(_lowerCamelCase , expected_items=3 )
if model is None:
if "summarization" in args.task:
__SCREAMING_SNAKE_CASE : SummarizationModule = SummarizationModule(_lowerCamelCase )
else:
__SCREAMING_SNAKE_CASE : SummarizationModule = TranslationModule(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("""/tmp""" )
or str(args.output_dir ).startswith("""/var""" )
):
__SCREAMING_SNAKE_CASE : str = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
__SCREAMING_SNAKE_CASE : Any = os.environ.get("""WANDB_PROJECT""" , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : str = WandbLogger(name=model.output_dir.name , project=_lowerCamelCase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
__SCREAMING_SNAKE_CASE : Optional[int] = WandbLogger(name=model.output_dir.name , project=F"hf_{dataset}" )
if args.early_stopping_patience >= 0:
__SCREAMING_SNAKE_CASE : str = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
__SCREAMING_SNAKE_CASE : Tuple = False
__SCREAMING_SNAKE_CASE : Dict = args.val_metric == """loss"""
__SCREAMING_SNAKE_CASE : pl.Trainer = generic_train(
_lowerCamelCase , _lowerCamelCase , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , _lowerCamelCase ) , early_stopping_callback=_lowerCamelCase , logger=_lowerCamelCase , )
pickle_save(model.hparams , model.output_dir / """hparams.pkl""" )
if not args.do_predict:
return model
__SCREAMING_SNAKE_CASE : Optional[int] = """"""
__SCREAMING_SNAKE_CASE : Any = sorted(glob.glob(os.path.join(args.output_dir , """*.ckpt""" ) , recursive=_lowerCamelCase ) )
if checkpoints:
__SCREAMING_SNAKE_CASE : List[Any] = checkpoints[-1]
__SCREAMING_SNAKE_CASE : str = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
UpperCamelCase__ : Optional[int] = argparse.ArgumentParser()
UpperCamelCase__ : Dict = pl.Trainer.add_argparse_args(parser)
UpperCamelCase__ : List[Any] = SummarizationModule.add_model_specific_args(parser, os.getcwd())
UpperCamelCase__ : List[str] = parser.parse_args()
main(args)
| 112
| 0
|
"""simple docstring"""
import argparse
from collections import defaultdict
import yaml
__SCREAMING_SNAKE_CASE ="docs/source/en/_toctree.yml"
def lowercase__( __SCREAMING_SNAKE_CASE : List[str] ):
lowercase_ : Optional[int] = defaultdict(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = []
lowercase_ : Tuple = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'local': doc['local'], 'title': doc['title']} )
else:
new_doc_list.append(__SCREAMING_SNAKE_CASE )
lowercase_ : str = new_doc_list
lowercase_ : List[Any] = [key for key, value in counts.items() if value > 1]
lowercase_ : Tuple = []
for duplicate_key in duplicates:
lowercase_ : str = list({doc['title'] for doc in doc_list if doc['local'] == duplicate_key} )
if len(__SCREAMING_SNAKE_CASE ) > 1:
raise ValueError(
F'''{duplicate_key} is present several times in the documentation table of content at '''
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if 'local' not in counts or counts[doc['local']] == 1] )
lowercase_ : Optional[Any] = sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(__SCREAMING_SNAKE_CASE ) > 1:
raise ValueError('{doc_list} has two \'overview\' docs which is not allowed.' )
overview_doc.extend(__SCREAMING_SNAKE_CASE )
# Sort
return overview_doc
def lowercase__( __SCREAMING_SNAKE_CASE : Dict=False ):
with open(__SCREAMING_SNAKE_CASE , encoding='utf-8' ) as f:
lowercase_ : Optional[int] = yaml.safe_load(f.read() )
# Get to the API doc
lowercase_ : Any = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowercase_ : int = content[api_idx]['sections']
# Then to the model doc
lowercase_ : str = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
lowercase_ : Union[str, Any] = api_doc[scheduler_idx]['sections']
lowercase_ : str = clean_doc_toc(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = False
if new_scheduler_doc != scheduler_doc:
lowercase_ : List[Any] = True
if overwrite:
lowercase_ : Optional[Any] = new_scheduler_doc
if diff:
if overwrite:
lowercase_ : Tuple = api_doc
with open(__SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(__SCREAMING_SNAKE_CASE , allow_unicode=__SCREAMING_SNAKE_CASE ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
def lowercase__( __SCREAMING_SNAKE_CASE : str=False ):
with open(__SCREAMING_SNAKE_CASE , encoding='utf-8' ) as f:
lowercase_ : str = yaml.safe_load(f.read() )
# Get to the API doc
lowercase_ : Optional[int] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowercase_ : Optional[int] = content[api_idx]['sections']
# Then to the model doc
lowercase_ : str = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
lowercase_ : List[Any] = False
lowercase_ : List[str] = api_doc[pipeline_idx]['sections']
lowercase_ : Optional[Any] = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
lowercase_ : int = pipeline_doc['section']
lowercase_ : str = clean_doc_toc(__SCREAMING_SNAKE_CASE )
if overwrite:
lowercase_ : Any = new_sub_pipeline_doc
new_pipeline_docs.append(__SCREAMING_SNAKE_CASE )
# sort overall pipeline doc
lowercase_ : Optional[Any] = clean_doc_toc(__SCREAMING_SNAKE_CASE )
if new_pipeline_docs != pipeline_docs:
lowercase_ : int = True
if overwrite:
lowercase_ : List[Any] = new_pipeline_docs
if diff:
if overwrite:
lowercase_ : Union[str, Any] = api_doc
with open(__SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(__SCREAMING_SNAKE_CASE , allow_unicode=__SCREAMING_SNAKE_CASE ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
__SCREAMING_SNAKE_CASE =parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 361
|
"""simple docstring"""
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def lowercase__( *__SCREAMING_SNAKE_CASE : Tuple ):
with open(__SCREAMING_SNAKE_CASE , 'r' ) as fh:
fcntl.flock(__SCREAMING_SNAKE_CASE , fcntl.LOCK_EX )
try:
print(*__SCREAMING_SNAKE_CASE )
finally:
fcntl.flock(__SCREAMING_SNAKE_CASE , fcntl.LOCK_UN )
__SCREAMING_SNAKE_CASE =int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
__SCREAMING_SNAKE_CASE =torch.device("cuda", local_rank)
__SCREAMING_SNAKE_CASE =socket.gethostname()
__SCREAMING_SNAKE_CASE =F"[{hostname}-{local_rank}]"
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__SCREAMING_SNAKE_CASE =dist.get_rank()
__SCREAMING_SNAKE_CASE =dist.get_world_size()
printflock(F"{gpu} is OK (global rank: {rank}/{world_size})")
dist.barrier()
if rank == 0:
printflock(F"pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}")
except Exception:
printflock(F"{gpu} is broken")
raise
| 321
| 0
|
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class __magic_name__ ( lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE = BlenderbotSmallTokenizer
SCREAMING_SNAKE_CASE = False
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
super().setUp()
__a =['__start__', 'adapt', 'act', 'ap@@', 'te', '__end__', '__unk__']
__a =dict(zip(__snake_case , range(len(__snake_case ) ) ) )
__a =['#version: 0.2', 'a p', 't e</w>', 'ap t</w>', 'a d', 'ad apt</w>', 'a c', 'ac t</w>', '']
__a ={'unk_token': '__unk__', 'bos_token': '__start__', 'eos_token': '__end__'}
__a =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__a =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__snake_case ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__snake_case ) )
def __magic_name__ ( self , **__snake_case ) -> List[Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__snake_case )
def __magic_name__ ( self , __snake_case ) -> Dict:
'''simple docstring'''
__a ='adapt act apte'
__a ='adapt act apte'
return input_text, output_text
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__a ='adapt act apte'
__a =['adapt', 'act', 'ap@@', 'te']
__a =tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
__a =[tokenizer.bos_token] + tokens + [tokenizer.eos_token]
__a =[0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
assert tok('sam' ).input_ids == [1384]
__a ='I am a small frog.'
__a =tok([src_text] , padding=__snake_case , truncation=__snake_case )['input_ids']
__a =tok.batch_decode(__snake_case , skip_special_tokens=__snake_case , clean_up_tokenization_spaces=__snake_case )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a =BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
__a ='I am a small frog .'
__a ='.'
__a =tok(__snake_case )['input_ids']
__a =tok(__snake_case )['input_ids']
assert encoded[-1] == encoded_dot[0]
| 218
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCAmelCase : Union[str, Any] = 16
_lowerCAmelCase : List[str] = 32
def UpperCamelCase_( _snake_case : Accelerator , _snake_case : int = 16 ):
"""simple docstring"""
__a =AutoTokenizer.from_pretrained('bert-base-cased' )
__a =load_dataset('glue' , 'mrpc' )
def tokenize_function(_snake_case : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
__a =tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_snake_case , max_length=_snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__a =datasets.map(
_snake_case , batched=_snake_case , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__a =tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_snake_case : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__a =128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__a =16
elif accelerator.mixed_precision != "no":
__a =8
else:
__a =None
return tokenizer.pad(
_snake_case , padding='longest' , max_length=_snake_case , pad_to_multiple_of=_snake_case , return_tensors='pt' , )
# Instantiate dataloaders.
__a =DataLoader(
tokenized_datasets['train'] , shuffle=_snake_case , collate_fn=_snake_case , batch_size=_snake_case )
__a =DataLoader(
tokenized_datasets['validation'] , shuffle=_snake_case , collate_fn=_snake_case , batch_size=_snake_case )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowerCAmelCase : List[Any] = mocked_dataloaders # noqa: F811
def UpperCamelCase_( _snake_case : Tuple , _snake_case : Union[str, Any] ):
"""simple docstring"""
if os.environ.get('TESTING_MOCKED_DATALOADERS' , _snake_case ) == "1":
__a =2
# New Code #
__a =int(args.gradient_accumulation_steps )
__a =int(args.local_sgd_steps )
# Initialize accelerator
__a =Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_snake_case )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__a =config['lr']
__a =int(config['num_epochs'] )
__a =int(config['seed'] )
__a =int(config['batch_size'] )
__a =evaluate.load('glue' , 'mrpc' )
set_seed(_snake_case )
__a , __a =get_dataloaders(_snake_case , _snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__a =AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=_snake_case )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__a =model.to(accelerator.device )
# Instantiate optimizer
__a =AdamW(params=model.parameters() , lr=_snake_case )
# Instantiate scheduler
__a =get_linear_schedule_with_warmup(
optimizer=_snake_case , num_warmup_steps=100 , num_training_steps=(len(_snake_case ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__a , __a , __a , __a , __a =accelerator.prepare(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
# Now we train the model
for epoch in range(_snake_case ):
model.train()
with LocalSGD(
accelerator=_snake_case , model=_snake_case , local_sgd_steps=_snake_case , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(_snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(_snake_case ):
__a =model(**_snake_case )
__a =output.loss
accelerator.backward(_snake_case )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(_snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__a =model(**_snake_case )
__a =outputs.logits.argmax(dim=-1 )
__a , __a =accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=_snake_case , references=_snake_case , )
__a =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , _snake_case )
def UpperCamelCase_( ):
"""simple docstring"""
__a =argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=_snake_case , default=_snake_case , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps' , type=_snake_case , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , )
parser.add_argument(
'--local_sgd_steps' , type=_snake_case , default=8 , help='Number of local SGD steps or None to disable local SGD' )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
__a =parser.parse_args()
__a ={'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(_snake_case , _snake_case )
if __name__ == "__main__":
main()
| 218
| 1
|
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def snake_case_ ( __SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
if (
(cp >= 0x4E00 and cp <= 0x9FFF)
or (cp >= 0x3400 and cp <= 0x4DBF) #
or (cp >= 0x20000 and cp <= 0x2A6DF) #
or (cp >= 0x2A700 and cp <= 0x2B73F) #
or (cp >= 0x2B740 and cp <= 0x2B81F) #
or (cp >= 0x2B820 and cp <= 0x2CEAF) #
or (cp >= 0xF900 and cp <= 0xFAFF)
or (cp >= 0x2F800 and cp <= 0x2FA1F) #
): #
return True
return False
def snake_case_ ( __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
for char in word:
lowercase_ : Dict = ord(__SCREAMING_SNAKE_CASE )
if not _is_chinese_char(__SCREAMING_SNAKE_CASE ):
return 0
return 1
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
lowercase_ : str = set()
for token in tokens:
lowercase_ : List[Any] = len(__SCREAMING_SNAKE_CASE ) > 1 and is_chinese(__SCREAMING_SNAKE_CASE )
if chinese_word:
word_set.add(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = list(__SCREAMING_SNAKE_CASE )
return word_list
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : set() ):
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
lowercase_ : Dict = max([len(__SCREAMING_SNAKE_CASE ) for w in chinese_word_set] )
lowercase_ : int = bert_tokens
lowercase_ , lowercase_ : Tuple = 0, len(__SCREAMING_SNAKE_CASE )
while start < end:
lowercase_ : Dict = True
if is_chinese(bert_word[start] ):
lowercase_ : str = min(end - start , __SCREAMING_SNAKE_CASE )
for i in range(__SCREAMING_SNAKE_CASE , 1 , -1 ):
lowercase_ : Any = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
lowercase_ : Tuple = '''##''' + bert_word[j]
lowercase_ : List[Any] = start + i
lowercase_ : Optional[int] = False
break
if single_word:
start += 1
return bert_word
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : LTP , __SCREAMING_SNAKE_CASE : BertTokenizer ):
"""simple docstring"""
lowercase_ : Union[str, Any] = []
for i in range(0 , len(__SCREAMING_SNAKE_CASE ) , 100 ):
lowercase_ : Union[str, Any] = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=['''cws'''] ).cws
lowercase_ : Dict = [get_chinese_word(__SCREAMING_SNAKE_CASE ) for r in res]
ltp_res.extend(__SCREAMING_SNAKE_CASE )
assert len(__SCREAMING_SNAKE_CASE ) == len(__SCREAMING_SNAKE_CASE )
lowercase_ : str = []
for i in range(0 , len(__SCREAMING_SNAKE_CASE ) , 100 ):
lowercase_ : Any = bert_tokenizer(lines[i : i + 100] , add_special_tokens=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=512 )
bert_res.extend(res['''input_ids'''] )
assert len(__SCREAMING_SNAKE_CASE ) == len(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = []
for input_ids, chinese_word in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Optional[Any] = []
for id in input_ids:
lowercase_ : int = bert_tokenizer._convert_id_to_token(__SCREAMING_SNAKE_CASE )
input_tokens.append(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = add_sub_symbol(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Dict = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__SCREAMING_SNAKE_CASE ):
if token[:2] == "##":
lowercase_ : Union[str, Any] = token[2:]
# save chinese tokens' pos
if len(__SCREAMING_SNAKE_CASE ) == 1 and _is_chinese_char(ord(__SCREAMING_SNAKE_CASE ) ):
ref_id.append(__SCREAMING_SNAKE_CASE )
ref_ids.append(__SCREAMING_SNAKE_CASE )
assert len(__SCREAMING_SNAKE_CASE ) == len(__SCREAMING_SNAKE_CASE )
return ref_ids
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
lowercase_ : str = f.readlines()
lowercase_ : str = [line.strip() for line in data if len(__SCREAMING_SNAKE_CASE ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
lowercase_ : Optional[int] = LTP(args.ltp ) # faster in GPU device
lowercase_ : Optional[Any] = BertTokenizer.from_pretrained(args.bert )
lowercase_ : Optional[Any] = prepare_ref(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
lowercase_ : List[Any] = [json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' for ref in ref_ids]
f.writelines(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_lowercase : Dict = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
required=False,
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp",
required=False,
type=str,
default="./resources/ltp",
help="resources for LTP tokenizer, usually a path",
)
parser.add_argument(
"--bert",
required=False,
type=str,
default="./resources/robert",
help="resources for Bert tokenizer",
)
parser.add_argument(
"--save_path",
required=False,
type=str,
default="./resources/ref.txt",
help="path to save res",
)
_lowercase : Tuple = parser.parse_args()
main(args)
| 264
|
'''simple docstring'''
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def snake_case_ ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
lowercase_ : Tuple = prime_factors(__SCREAMING_SNAKE_CASE )
if is_square_free(__SCREAMING_SNAKE_CASE ):
return -1 if len(__SCREAMING_SNAKE_CASE ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 264
| 1
|
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
return " ".join(
"".join(word[::-1] ) if len(SCREAMING_SNAKE_CASE ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('''Hey wollef sroirraw'''))
| 108
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = tempfile.mkdtemp()
lowerCAmelCase : Optional[int] = 8
# DPR tok
lowerCAmelCase : Dict = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , "dpr_tokenizer" )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
lowerCAmelCase : Dict = os.path.join(snake_case__ , DPR_VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
# BART tok
lowerCAmelCase : Optional[int] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowerCAmelCase : Optional[int] = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
lowerCAmelCase : List[Any] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowerCAmelCase : str = {"unk_token": "<unk>"}
lowerCAmelCase : int = os.path.join(self.tmpdirname , "bart_tokenizer" )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
lowerCAmelCase : int = os.path.join(snake_case__ , BART_VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase : Dict = os.path.join(snake_case__ , BART_VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(snake_case__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(snake_case__ ) )
def lowercase__ ( self ):
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def lowercase__ ( self ):
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) )
def lowercase__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = os.path.join(self.tmpdirname , "rag_tokenizer" )
lowerCAmelCase : List[Any] = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
lowerCAmelCase : Optional[Any] = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(snake_case__ )
rag_tokenizer.save_pretrained(snake_case__ )
lowerCAmelCase : List[str] = RagTokenizer.from_pretrained(snake_case__ , config=snake_case__ )
self.assertIsInstance(new_rag_tokenizer.question_encoder , snake_case__ )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , snake_case__ )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = RagTokenizer.from_pretrained("facebook/rag-token-nq" )
lowerCAmelCase : Dict = [
"who got the first nobel prize in physics",
"when is the next deadpool movie being released",
"which mode is used for short wave broadcast service",
"who is the owner of reading football club",
"when is the next scandal episode coming out",
"when is the last time the philadelphia won the superbowl",
"what is the most current adobe flash player version",
"how many episodes are there in dragon ball z",
"what is the first step in the evolution of the eye",
"where is gall bladder situated in human body",
"what is the main mineral in lithium batteries",
"who is the president of usa right now",
"where do the greasers live in the outsiders",
"panda is a national animal of which country",
"what is the name of manchester united stadium",
]
lowerCAmelCase : Union[str, Any] = tokenizer(snake_case__ )
self.assertIsNotNone(snake_case__ )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = RagTokenizer.from_pretrained("facebook/rag-sequence-nq" )
lowerCAmelCase : List[str] = [
"who got the first nobel prize in physics",
"when is the next deadpool movie being released",
"which mode is used for short wave broadcast service",
"who is the owner of reading football club",
"when is the next scandal episode coming out",
"when is the last time the philadelphia won the superbowl",
"what is the most current adobe flash player version",
"how many episodes are there in dragon ball z",
"what is the first step in the evolution of the eye",
"where is gall bladder situated in human body",
"what is the main mineral in lithium batteries",
"who is the president of usa right now",
"where do the greasers live in the outsiders",
"panda is a national animal of which country",
"what is the name of manchester united stadium",
]
lowerCAmelCase : str = tokenizer(snake_case__ )
self.assertIsNotNone(snake_case__ )
| 108
| 1
|
"""simple docstring"""
def _a ( _snake_case ):
"""simple docstring"""
return 10 - x * x
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
if equation(lowercase__ ) * equation(lowercase__ ) >= 0:
raise ValueError("""Wrong space!""" )
UpperCAmelCase = a
while (b - a) >= 0.01:
# Find middle point
UpperCAmelCase = (a + b) / 2
# Check if middle point is root
if equation(lowercase__ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(lowercase__ ) * equation(lowercase__ ) < 0:
UpperCAmelCase = c
else:
UpperCAmelCase = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 351
|
"""simple docstring"""
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
_UpperCamelCase = {
"""text_branch""": """text_model""",
"""audio_branch""": """audio_model.audio_encoder""",
"""attn""": """attention.self""",
"""self.proj""": """output.dense""",
"""attention.self_mask""": """attn_mask""",
"""mlp.fc1""": """intermediate.dense""",
"""mlp.fc2""": """output.dense""",
"""norm1""": """layernorm_before""",
"""norm2""": """layernorm_after""",
"""bn0""": """batch_norm""",
}
_UpperCamelCase = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""")
def _a ( _snake_case , _snake_case=False ):
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = create_model(
"""HTSAT-tiny""" , """roberta""" , _snake_case , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=_snake_case , fusion_type="""aff_2d""" if enable_fusion else None , )
return model, model_cfg
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = {}
UpperCAmelCase = R""".*sequential.(\d+).*"""
UpperCAmelCase = R""".*_projection.(\d+).*"""
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
UpperCAmelCase = key.replace(_snake_case , _snake_case )
if re.match(_snake_case , _snake_case ):
# replace sequential layers with list
UpperCAmelCase = re.match(_snake_case , _snake_case ).group(1 )
UpperCAmelCase = key.replace(F'''sequential.{sequential_layer}.''' , F'''layers.{int(_snake_case )//3}.linear.''' )
elif re.match(_snake_case , _snake_case ):
UpperCAmelCase = int(re.match(_snake_case , _snake_case ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
UpperCAmelCase = 1 if projecton_layer == 0 else 2
UpperCAmelCase = key.replace(F'''_projection.{projecton_layer}.''' , F'''_projection.linear{transformers_projection_layer}.''' )
if "audio" and "qkv" in key:
# split qkv into query key and value
UpperCAmelCase = value
UpperCAmelCase = mixed_qkv.size(0 ) // 3
UpperCAmelCase = mixed_qkv[:qkv_dim]
UpperCAmelCase = mixed_qkv[qkv_dim : qkv_dim * 2]
UpperCAmelCase = mixed_qkv[qkv_dim * 2 :]
UpperCAmelCase = query_layer
UpperCAmelCase = key_layer
UpperCAmelCase = value_layer
else:
UpperCAmelCase = value
return model_state_dict
def _a ( _snake_case , _snake_case , _snake_case , _snake_case=False ):
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = init_clap(_snake_case , enable_fusion=_snake_case )
clap_model.eval()
UpperCAmelCase = clap_model.state_dict()
UpperCAmelCase = rename_state_dict(_snake_case )
UpperCAmelCase = ClapConfig()
UpperCAmelCase = enable_fusion
UpperCAmelCase = ClapModel(_snake_case )
# ignore the spectrogram embedding layer
model.load_state_dict(_snake_case , strict=_snake_case )
model.save_pretrained(_snake_case )
transformers_config.save_pretrained(_snake_case )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""")
_UpperCamelCase = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 234
| 0
|
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__magic_name__: Optional[int] = logging.get_logger(__name__)
__magic_name__: Any = {
"facebook/detr-resnet-50": "https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json",
# See all DETR models at https://huggingface.co/models?filter=detr
}
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : Union[str, Any] = '''detr'''
lowercase__ : Union[str, Any] = ['''past_key_values''']
lowercase__ : Dict = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=3 , lowerCAmelCase__=1_00 , lowerCAmelCase__=6 , lowerCAmelCase__=20_48 , lowerCAmelCase__=8 , lowerCAmelCase__=6 , lowerCAmelCase__=20_48 , lowerCAmelCase__=8 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=True , lowerCAmelCase__="relu" , lowerCAmelCase__=2_56 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=1.0 , lowerCAmelCase__=False , lowerCAmelCase__="sine" , lowerCAmelCase__="resnet50" , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=1 , lowerCAmelCase__=5 , lowerCAmelCase__=2 , lowerCAmelCase__=1 , lowerCAmelCase__=1 , lowerCAmelCase__=5 , lowerCAmelCase__=2 , lowerCAmelCase__=0.1 , **lowerCAmelCase__ , ) -> Any:
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
__magic_name__ : List[Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__magic_name__ : Optional[int] = backbone_config.get("""model_type""" )
__magic_name__ : List[str] = CONFIG_MAPPING[backbone_model_type]
__magic_name__ : List[str] = config_class.from_dict(lowerCAmelCase__ )
# set timm attributes to None
__magic_name__ ,__magic_name__ ,__magic_name__ : Optional[Any] = None, None, None
__magic_name__ : Any = use_timm_backbone
__magic_name__ : Any = backbone_config
__magic_name__ : Tuple = num_channels
__magic_name__ : Any = num_queries
__magic_name__ : Dict = d_model
__magic_name__ : int = encoder_ffn_dim
__magic_name__ : Optional[int] = encoder_layers
__magic_name__ : int = encoder_attention_heads
__magic_name__ : List[str] = decoder_ffn_dim
__magic_name__ : List[str] = decoder_layers
__magic_name__ : int = decoder_attention_heads
__magic_name__ : Union[str, Any] = dropout
__magic_name__ : int = attention_dropout
__magic_name__ : Dict = activation_dropout
__magic_name__ : List[Any] = activation_function
__magic_name__ : Dict = init_std
__magic_name__ : Union[str, Any] = init_xavier_std
__magic_name__ : int = encoder_layerdrop
__magic_name__ : Union[str, Any] = decoder_layerdrop
__magic_name__ : Dict = encoder_layers
__magic_name__ : Any = auxiliary_loss
__magic_name__ : Any = position_embedding_type
__magic_name__ : List[str] = backbone
__magic_name__ : Union[str, Any] = use_pretrained_backbone
__magic_name__ : List[str] = dilation
# Hungarian matcher
__magic_name__ : Optional[int] = class_cost
__magic_name__ : List[str] = bbox_cost
__magic_name__ : Any = giou_cost
# Loss coefficients
__magic_name__ : str = mask_loss_coefficient
__magic_name__ : Any = dice_loss_coefficient
__magic_name__ : Dict = bbox_loss_coefficient
__magic_name__ : str = giou_loss_coefficient
__magic_name__ : List[str] = eos_coefficient
super().__init__(is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def __magic_name__ ( self ) -> int:
return self.encoder_attention_heads
@property
def __magic_name__ ( self ) -> int:
return self.d_model
@classmethod
def __magic_name__ ( cls , lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
return cls(backbone_config=lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__ ( self ) -> Dict[str, any]:
__magic_name__ : List[str] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
__magic_name__ : List[Any] = self.backbone_config.to_dict()
__magic_name__ : Union[str, Any] = self.__class__.model_type
return output
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : Union[str, Any] = version.parse('''1.11''' )
@property
def __magic_name__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def __magic_name__ ( self ) -> float:
return 1e-5
@property
def __magic_name__ ( self ) -> int:
return 12
| 342
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__: Tuple = {
"configuration_clap": [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapAudioConfig",
"ClapConfig",
"ClapTextConfig",
],
"processing_clap": ["ClapProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__: Union[str, Any] = [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapModel",
"ClapPreTrainedModel",
"ClapTextModel",
"ClapTextModelWithProjection",
"ClapAudioModel",
"ClapAudioModelWithProjection",
]
__magic_name__: Optional[Any] = ["ClapFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
__magic_name__: Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 342
| 1
|
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : List[Any] = ["image_processor", "tokenizer"]
__snake_case : str = "FlavaImageProcessor"
__snake_case : Dict = ("BertTokenizer", "BertTokenizerFast")
def __init__( self: int , UpperCAmelCase_: List[str]=None , UpperCAmelCase_: int=None , **UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = kwargs.pop("""feature_extractor""" )
_SCREAMING_SNAKE_CASE = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.image_processor
def __call__( self: Tuple , UpperCAmelCase_: Optional[ImageInput] = None , UpperCAmelCase_: Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , UpperCAmelCase_: bool = True , UpperCAmelCase_: Union[bool, str, PaddingStrategy] = False , UpperCAmelCase_: Union[bool, str, TruncationStrategy] = False , UpperCAmelCase_: Optional[int] = None , UpperCAmelCase_: int = 0 , UpperCAmelCase_: Optional[int] = None , UpperCAmelCase_: Optional[bool] = None , UpperCAmelCase_: Optional[bool] = None , UpperCAmelCase_: Optional[bool] = None , UpperCAmelCase_: Optional[bool] = None , UpperCAmelCase_: bool = False , UpperCAmelCase_: bool = False , UpperCAmelCase_: bool = False , UpperCAmelCase_: bool = False , UpperCAmelCase_: bool = True , UpperCAmelCase_: Optional[Union[str, TensorType]] = None , **UpperCAmelCase_: Dict , ):
'''simple docstring'''
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
_SCREAMING_SNAKE_CASE = self.tokenizer(
text=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , stride=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , return_special_tokens_mask=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , return_length=UpperCAmelCase_ , verbose=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , )
if images is not None:
_SCREAMING_SNAKE_CASE = self.image_processor(
UpperCAmelCase_ , return_image_mask=UpperCAmelCase_ , return_codebook_pixels=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , )
if text is not None and images is not None:
encoding.update(UpperCAmelCase_ )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase_ ) , tensor_type=UpperCAmelCase_ )
def UpperCamelCase ( self: Union[str, Any] , *UpperCAmelCase_: List[str] , **UpperCAmelCase_: Optional[int] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: str , *UpperCAmelCase_: str , **UpperCAmelCase_: int ):
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
@property
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.tokenizer.model_input_names
_SCREAMING_SNAKE_CASE = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , UpperCAmelCase_ , )
return self.image_processor_class
@property
def UpperCamelCase ( self: Any ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , UpperCAmelCase_ , )
return self.image_processor
| 125
|
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
UpperCamelCase = re.compile(R'''\s+''')
def __lowerCamelCase ( snake_case__ ) -> List[str]:
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(snake_case__ ,"""""" ,example["""content"""] ).encode("""utf-8""" ) ).hexdigest()}
def __lowerCamelCase ( snake_case__ ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [len(snake_case__ ) for line in example["""content"""].splitlines()]
return {"line_mean": np.mean(snake_case__ ), "line_max": max(snake_case__ )}
def __lowerCamelCase ( snake_case__ ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = np.mean([c.isalnum() for c in example["""content"""]] )
return {"alpha_frac": alpha_frac}
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Tuple:
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example["""hash"""] )
return True
else:
return False
def __lowerCamelCase ( snake_case__ ,snake_case__=5 ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""auto-generated""", """autogenerated""", """automatically generated"""]
_SCREAMING_SNAKE_CASE = example["""content"""].splitlines()
for _, line in zip(range(snake_case__ ) ,snake_case__ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def __lowerCamelCase ( snake_case__ ,snake_case__=5 ,snake_case__=0.05 ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""unit tests""", """test file""", """configuration file"""]
_SCREAMING_SNAKE_CASE = example["""content"""].splitlines()
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 0
# first test
for _, line in zip(range(snake_case__ ) ,snake_case__ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_SCREAMING_SNAKE_CASE = example["""content"""].count("""\n""" )
_SCREAMING_SNAKE_CASE = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("""config""" )
count_test += line.lower().count("""test""" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def __lowerCamelCase ( snake_case__ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""def """, """class """, """for """, """while """]
_SCREAMING_SNAKE_CASE = example["""content"""].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def __lowerCamelCase ( snake_case__ ,snake_case__=4 ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = example["""content"""].splitlines()
_SCREAMING_SNAKE_CASE = 0
for line in lines:
counter += line.lower().count("""=""" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def __lowerCamelCase ( snake_case__ ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = tokenizer(example["""content"""] ,truncation=snake_case__ )["""input_ids"""]
_SCREAMING_SNAKE_CASE = len(example["""content"""] ) / len(snake_case__ )
return {"ratio": ratio}
def __lowerCamelCase ( snake_case__ ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = {}
results.update(get_hash(snake_case__ ) )
results.update(line_stats(snake_case__ ) )
results.update(alpha_stats(snake_case__ ) )
results.update(char_token_ratio(snake_case__ ) )
results.update(is_autogenerated(snake_case__ ) )
results.update(is_config_or_test(snake_case__ ) )
results.update(has_no_keywords(snake_case__ ) )
results.update(has_few_assignments(snake_case__ ) )
return results
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> int:
"""simple docstring"""
if not check_uniques(snake_case__ ,snake_case__ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def __lowerCamelCase ( snake_case__ ) -> Optional[Any]:
"""simple docstring"""
with open(snake_case__ ,"""rb""" ) as f_in:
with gzip.open(str(snake_case__ ) + """.gz""" ,"""wb""" ,compresslevel=6 ) as f_out:
shutil.copyfileobj(snake_case__ ,snake_case__ )
os.unlink(snake_case__ )
# Settings
UpperCamelCase = HfArgumentParser(PreprocessingArguments)
UpperCamelCase = parser.parse_args()
if args.num_workers is None:
UpperCamelCase = multiprocessing.cpu_count()
UpperCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
UpperCamelCase = time.time()
UpperCamelCase = load_dataset(args.dataset_name, split='''train''')
print(f"Time to load dataset: {time.time()-t_start:.2f}")
# Run preprocessing
UpperCamelCase = time.time()
UpperCamelCase = ds.map(preprocess, num_proc=args.num_workers)
print(f"Time to preprocess dataset: {time.time()-t_start:.2f}")
# Deduplicate hashes
UpperCamelCase = set(ds.unique('''hash'''))
UpperCamelCase = len(uniques) / len(ds)
print(f"Fraction of duplicates: {1-frac:.2%}")
# Deduplicate data and apply heuristics
UpperCamelCase = time.time()
UpperCamelCase = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(f"Time to filter dataset: {time.time()-t_start:.2f}")
print(f"Size of filtered dataset: {len(ds_filter)}")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
UpperCamelCase = time.time()
UpperCamelCase , UpperCamelCase = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f"Time to deduplicate dataset: {time.time()-t_start:.2f}")
print(f"Size of deduplicate dataset: {len(ds_filter)}")
# Save data in batches of samples_per_file
UpperCamelCase = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
UpperCamelCase = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
UpperCamelCase = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
UpperCamelCase = str(data_dir / f"file-{file_number+1:012}.json")
UpperCamelCase = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f"Time to save dataset: {time.time()-t_start:.2f}")
| 125
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase_ = {
'configuration_mobilenet_v2': [
'MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileNetV2Config',
'MobileNetV2OnnxConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['MobileNetV2FeatureExtractor']
lowerCAmelCase_ = ['MobileNetV2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileNetV2ForImageClassification',
'MobileNetV2ForSemanticSegmentation',
'MobileNetV2Model',
'MobileNetV2PreTrainedModel',
'load_tf_weights_in_mobilenet_v2',
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 16
|
'''simple docstring'''
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class a_ ( lowerCamelCase ):
lowercase = (DDPMParallelScheduler,)
def A__ ( self , **_SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**_SCREAMING_SNAKE_CASE )
return config
def A__ ( self ) -> List[str]:
"""simple docstring"""
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=_SCREAMING_SNAKE_CASE , beta_end=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Tuple:
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> List[Any]:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> str:
"""simple docstring"""
self.check_over_configs(thresholding=_SCREAMING_SNAKE_CASE )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_SCREAMING_SNAKE_CASE , prediction_type=_SCREAMING_SNAKE_CASE , sample_max_value=_SCREAMING_SNAKE_CASE , )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
for t in [0, 500, 999]:
self.check_over_forward(time_step=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
UpperCamelCase = self.dummy_sample_deter + 0.1
UpperCamelCase = self.dummy_sample_deter - 0.1
UpperCamelCase = samplea.shape[0]
UpperCamelCase = torch.stack([samplea, samplea, samplea] , dim=0 )
UpperCamelCase = torch.arange(_SCREAMING_SNAKE_CASE )[0:3, None].repeat(1 , _SCREAMING_SNAKE_CASE )
UpperCamelCase = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
UpperCamelCase = scheduler.batch_step_no_noise(_SCREAMING_SNAKE_CASE , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
UpperCamelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 1_1_5_3.1_8_3_3 ) < 1e-2
assert abs(result_mean.item() - 0.5_0_0_5 ) < 1e-3
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
UpperCamelCase = torch.manual_seed(0 )
for t in reversed(range(_SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
UpperCamelCase = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
UpperCamelCase = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample
UpperCamelCase = pred_prev_sample
UpperCamelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def A__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config(prediction_type="""v_prediction""" )
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
UpperCamelCase = torch.manual_seed(0 )
for t in reversed(range(_SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
UpperCamelCase = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
UpperCamelCase = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample
UpperCamelCase = pred_prev_sample
UpperCamelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
UpperCamelCase = scheduler.timesteps
for i, timestep in enumerate(_SCREAMING_SNAKE_CASE ):
if i == len(_SCREAMING_SNAKE_CASE ) - 1:
UpperCamelCase = -1
else:
UpperCamelCase = timesteps[i + 1]
UpperCamelCase = scheduler.previous_timestep(_SCREAMING_SNAKE_CASE )
UpperCamelCase = prev_t.item()
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = [100, 87, 50, 51, 0]
with self.assertRaises(_SCREAMING_SNAKE_CASE , msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = [100, 87, 50, 1, 0]
UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
with self.assertRaises(_SCREAMING_SNAKE_CASE , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=_SCREAMING_SNAKE_CASE , timesteps=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_SCREAMING_SNAKE_CASE , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
| 321
| 0
|
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 360
|
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ :Union[str, Any] = logging.get_logger(__name__)
A_ :Tuple = {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''',
}
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : Any ="""xlnet"""
UpperCamelCase__ : Tuple =["""mems"""]
UpperCamelCase__ : Any ={
"""n_token""": """vocab_size""", # Backward compatibility
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowerCamelCase__=32000 , lowerCamelCase__=1024 , lowerCamelCase__=24 , lowerCamelCase__=16 , lowerCamelCase__=4096 , lowerCamelCase__="gelu" , lowerCamelCase__=True , lowerCamelCase__="bi" , lowerCamelCase__=0.02 , lowerCamelCase__=1E-12 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=-1 , lowerCamelCase__=False , lowerCamelCase__="last" , lowerCamelCase__=True , lowerCamelCase__="tanh" , lowerCamelCase__=0.1 , lowerCamelCase__=5 , lowerCamelCase__=5 , lowerCamelCase__=5 , lowerCamelCase__=1 , lowerCamelCase__=2 , **lowerCamelCase__ , ):
"""simple docstring"""
__UpperCamelCase : Optional[int] =vocab_size
__UpperCamelCase : int =d_model
__UpperCamelCase : Optional[Any] =n_layer
__UpperCamelCase : str =n_head
if d_model % n_head != 0:
raise ValueError(f'\'d_model % n_head\' ({d_model % n_head}) should be equal to 0' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f'`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})' )
__UpperCamelCase : Optional[Any] =d_model // n_head
__UpperCamelCase : List[Any] =ff_activation
__UpperCamelCase : Tuple =d_inner
__UpperCamelCase : List[Any] =untie_r
__UpperCamelCase : List[Any] =attn_type
__UpperCamelCase : Dict =initializer_range
__UpperCamelCase : List[str] =layer_norm_eps
__UpperCamelCase : List[str] =dropout
__UpperCamelCase : int =mem_len
__UpperCamelCase : List[Any] =reuse_len
__UpperCamelCase : Union[str, Any] =bi_data
__UpperCamelCase : Optional[Any] =clamp_len
__UpperCamelCase : Tuple =same_length
__UpperCamelCase : int =summary_type
__UpperCamelCase : Dict =summary_use_proj
__UpperCamelCase : Dict =summary_activation
__UpperCamelCase : str =summary_last_dropout
__UpperCamelCase : Dict =start_n_top
__UpperCamelCase : Optional[Any] =end_n_top
__UpperCamelCase : int =bos_token_id
__UpperCamelCase : Union[str, Any] =pad_token_id
__UpperCamelCase : Dict =eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'
' instead.' , lowerCamelCase__ , )
__UpperCamelCase : Dict =kwargs['use_cache']
__UpperCamelCase : Optional[int] =use_mems_eval
__UpperCamelCase : Any =use_mems_train
super().__init__(pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
@property
def __lowercase ( self ):
"""simple docstring"""
logger.info(f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
return -1
@max_position_embeddings.setter
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
raise NotImplementedError(
f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
| 245
| 0
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ : Union[str, Any] = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowercase__ : Dict = 25_00_04
lowercase__ : List[str] = 25_00_20
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( lowerCAmelCase__ , unittest.TestCase):
_lowerCAmelCase : List[str] = MBartTokenizer
_lowerCAmelCase : str = MBartTokenizerFast
_lowerCAmelCase : Dict = True
_lowerCAmelCase : Tuple = True
def _snake_case ( self : List[str] ):
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ : List[str] = MBartTokenizer(lowercase_ , keep_accents=lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self : Optional[int] ):
snake_case_ : Union[str, Any] = MBartTokenizer(lowercase_ , keep_accents=lowercase_ )
snake_case_ : Union[str, Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowercase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
snake_case_ : Tuple = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
snake_case_ : Tuple = tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(
lowercase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
snake_case_ : Any = tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def _snake_case ( self : int ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
snake_case_ : str = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case_ : Tuple = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
snake_case_ : Optional[int] = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
snake_case_ : Optional[Any] = tempfile.mkdtemp()
snake_case_ : str = tokenizer_r.save_pretrained(lowercase_ )
snake_case_ : int = tokenizer_p.save_pretrained(lowercase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
snake_case_ : List[str] = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(lowercase_ , lowercase_ )
# Checks everything loads correctly in the same way
snake_case_ : List[Any] = tokenizer_r.from_pretrained(lowercase_ )
snake_case_ : List[Any] = tokenizer_p.from_pretrained(lowercase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_ , lowercase_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowercase_ )
# Save tokenizer rust, legacy_format=True
snake_case_ : int = tempfile.mkdtemp()
snake_case_ : Optional[int] = tokenizer_r.save_pretrained(lowercase_ , legacy_format=lowercase_ )
snake_case_ : str = tokenizer_p.save_pretrained(lowercase_ )
# Checks it save with the same files
self.assertSequenceEqual(lowercase_ , lowercase_ )
# Checks everything loads correctly in the same way
snake_case_ : Optional[Any] = tokenizer_r.from_pretrained(lowercase_ )
snake_case_ : List[Any] = tokenizer_p.from_pretrained(lowercase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_ , lowercase_ ) )
shutil.rmtree(lowercase_ )
# Save tokenizer rust, legacy_format=False
snake_case_ : List[Any] = tempfile.mkdtemp()
snake_case_ : Dict = tokenizer_r.save_pretrained(lowercase_ , legacy_format=lowercase_ )
snake_case_ : Any = tokenizer_p.save_pretrained(lowercase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
snake_case_ : Optional[int] = tokenizer_r.from_pretrained(lowercase_ )
snake_case_ : Tuple = tokenizer_p.from_pretrained(lowercase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_ , lowercase_ ) )
shutil.rmtree(lowercase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( unittest.TestCase):
_lowerCAmelCase : int = """facebook/mbart-large-en-ro"""
_lowerCAmelCase : str = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
_lowerCAmelCase : Tuple = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
_lowerCAmelCase : List[str] = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE]
@classmethod
def _snake_case ( cls : List[str] ):
snake_case_ : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
snake_case_ : Dict = 1
return cls
def _snake_case ( self : int ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 250020 )
def _snake_case ( self : Optional[int] ):
snake_case_ : List[str] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowercase_ )
def _snake_case ( self : Optional[Any] ):
self.assertIn(lowercase_ , self.tokenizer.all_special_ids )
snake_case_ : str = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
snake_case_ : Dict = self.tokenizer.decode(lowercase_ , skip_special_tokens=lowercase_ )
snake_case_ : List[str] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
self.assertNotIn(self.tokenizer.eos_token , lowercase_ )
def _snake_case ( self : Tuple ):
snake_case_ : List[Any] = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , lowercase_ )
snake_case_ : Tuple = 10
snake_case_ : int = self.tokenizer(lowercase_ , max_length=lowercase_ , truncation=lowercase_ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , lowercase_ )
self.assertEqual(len(lowercase_ ) , lowercase_ )
def _snake_case ( self : str ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [250026, 250001] )
def _snake_case ( self : str ):
snake_case_ : Any = tempfile.mkdtemp()
snake_case_ : Union[str, Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowercase_ )
snake_case_ : Tuple = MBartTokenizer.from_pretrained(lowercase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowercase_ )
@require_torch
def _snake_case ( self : Any ):
snake_case_ : Any = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowercase_ , return_tensors='''pt''' )
snake_case_ : str = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def _snake_case ( self : Optional[int] ):
snake_case_ : Tuple = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowercase_ , truncation=lowercase_ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
snake_case_ : Any = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
snake_case_ : List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowercase_ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def _snake_case ( self : Optional[Any] ):
snake_case_ : Union[str, Any] = self.tokenizer(self.src_text , padding=lowercase_ , truncation=lowercase_ , max_length=3 , return_tensors='''pt''' )
snake_case_ : Optional[Any] = self.tokenizer(
text_target=self.tgt_text , padding=lowercase_ , truncation=lowercase_ , max_length=10 , return_tensors='''pt''' )
snake_case_ : Any = targets['''input_ids''']
snake_case_ : List[str] = shift_tokens_right(lowercase_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _snake_case ( self : Union[str, Any] ):
snake_case_ : Tuple = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(lowercase_ ) , {
# A, test, EOS, en_XX
'''input_ids''': [[62, 3034, 2, 250004]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 250001,
} , )
| 264
|
"""simple docstring"""
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __lowercase ( _a , _a ):
# Load checkpoint
snake_case_ : Optional[Any] = torch.load(_a , map_location='''cpu''' )
snake_case_ : Union[str, Any] = chkpt['''model''']
# We have the base model one level deeper than the original XLM repository
snake_case_ : Dict = {}
for k, v in state_dict.items():
if "pred_layer" in k:
snake_case_ : Union[str, Any] = v
else:
snake_case_ : Dict = v
snake_case_ : Union[str, Any] = chkpt['''params''']
snake_case_ : int = {n: v for n, v in config.items() if not isinstance(_a , (torch.FloatTensor, numpy.ndarray) )}
snake_case_ : int = chkpt['''dico_word2id''']
snake_case_ : str = {s + '''</w>''' if s.find('''@@''' ) == -1 and i > 13 else s.replace('''@@''' , '''''' ): i for s, i in vocab.items()}
# Save pytorch-model
snake_case_ : Union[str, Any] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
snake_case_ : Union[str, Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
snake_case_ : Any = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''vocab_file''']
print(f"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(_a , _a )
print(f"Save configuration file to {pytorch_config_dump_path}" )
with open(_a , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(_a , indent=2 ) + '''\n''' )
print(f"Save vocab file to {pytorch_config_dump_path}" )
with open(_a , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(_a , indent=2 ) + '''\n''' )
if __name__ == "__main__":
lowercase__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xlm_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowercase__ : List[str] = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 264
| 1
|
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_a : Dict = logging.get_logger(__name__)
_a : Union[str, Any] = {
"""microsoft/conditional-detr-resnet-50""": (
"""https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"""
),
}
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : List[Any] ="""conditional_detr"""
a : List[str] =["""past_key_values"""]
a : Dict ={
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=3_00,__SCREAMING_SNAKE_CASE=6,__SCREAMING_SNAKE_CASE=20_48,__SCREAMING_SNAKE_CASE=8,__SCREAMING_SNAKE_CASE=6,__SCREAMING_SNAKE_CASE=20_48,__SCREAMING_SNAKE_CASE=8,__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE="relu",__SCREAMING_SNAKE_CASE=2_56,__SCREAMING_SNAKE_CASE=0.1,__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=0.02,__SCREAMING_SNAKE_CASE=1.0,__SCREAMING_SNAKE_CASE=False,__SCREAMING_SNAKE_CASE="sine",__SCREAMING_SNAKE_CASE="resnet50",__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=False,__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=5,__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=1,__SCREAMING_SNAKE_CASE=1,__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=5,__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=0.25,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
__lowerCAmelCase = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = backbone_config.get("""model_type""" )
__lowerCAmelCase = CONFIG_MAPPING[backbone_model_type]
__lowerCAmelCase = config_class.from_dict(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = use_timm_backbone
__lowerCAmelCase = backbone_config
__lowerCAmelCase = num_channels
__lowerCAmelCase = num_queries
__lowerCAmelCase = d_model
__lowerCAmelCase = encoder_ffn_dim
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = encoder_attention_heads
__lowerCAmelCase = decoder_ffn_dim
__lowerCAmelCase = decoder_layers
__lowerCAmelCase = decoder_attention_heads
__lowerCAmelCase = dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = activation_dropout
__lowerCAmelCase = activation_function
__lowerCAmelCase = init_std
__lowerCAmelCase = init_xavier_std
__lowerCAmelCase = encoder_layerdrop
__lowerCAmelCase = decoder_layerdrop
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = auxiliary_loss
__lowerCAmelCase = position_embedding_type
__lowerCAmelCase = backbone
__lowerCAmelCase = use_pretrained_backbone
__lowerCAmelCase = dilation
# Hungarian matcher
__lowerCAmelCase = class_cost
__lowerCAmelCase = bbox_cost
__lowerCAmelCase = giou_cost
# Loss coefficients
__lowerCAmelCase = mask_loss_coefficient
__lowerCAmelCase = dice_loss_coefficient
__lowerCAmelCase = cls_loss_coefficient
__lowerCAmelCase = bbox_loss_coefficient
__lowerCAmelCase = giou_loss_coefficient
__lowerCAmelCase = focal_alpha
super().__init__(is_encoder_decoder=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self.d_model
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
__lowerCAmelCase = self.backbone_config.to_dict()
__lowerCAmelCase = self.__class__.model_type
return output
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : Optional[Any] =version.parse("""1.11""" )
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return 1e-5
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return 12
| 46
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_a : Union[str, Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : str = ["""BartphoTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
_a : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 46
| 1
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
lowerCamelCase__ = logging.get_logger(__name__)
def __lowerCAmelCase (__lowerCAmelCase ):
if isinstance(__lowerCAmelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__lowerCAmelCase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__lowerCAmelCase ):
return [[videos]]
raise ValueError(F"""Could not make batched video from {videos}""" )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Tuple = ["pixel_values"]
def __init__( self : Optional[Any] , lowerCamelCase__ : bool = True , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , lowerCamelCase__ : bool = True , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : bool = True , lowerCamelCase__ : Union[int, float] = 1 / 2_55 , lowerCamelCase__ : bool = True , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Union[float, List[float]]] = None , lowerCamelCase__ : Optional[Union[float, List[float]]] = None , **lowerCamelCase__ : Dict , ) ->None:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : str = size if size is not None else {"shortest_edge": 2_56}
_UpperCAmelCase : Tuple = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
_UpperCAmelCase : Any = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
_UpperCAmelCase : Dict = get_size_dict(lowerCamelCase__ , param_name="crop_size" )
_UpperCAmelCase : Tuple = do_resize
_UpperCAmelCase : Tuple = size
_UpperCAmelCase : List[Any] = do_center_crop
_UpperCAmelCase : Any = crop_size
_UpperCAmelCase : Dict = resample
_UpperCAmelCase : List[Any] = do_rescale
_UpperCAmelCase : Any = rescale_factor
_UpperCAmelCase : Dict = offset
_UpperCAmelCase : Any = do_normalize
_UpperCAmelCase : Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCAmelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Dict[str, int] , lowerCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ : Optional[int] , ) ->np.ndarray:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
if "shortest_edge" in size:
_UpperCAmelCase : Any = get_resize_output_image_size(lowerCamelCase__ , size["shortest_edge"] , default_to_square=lowerCamelCase__ )
elif "height" in size and "width" in size:
_UpperCAmelCase : List[str] = (size["height"], size["width"])
else:
raise ValueError(F"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Dict[str, int] , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ : Any , ) ->np.ndarray:
'''simple docstring'''
_UpperCAmelCase : List[Any] = get_size_dict(lowerCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(lowerCamelCase__ , size=(size["height"], size["width"]) , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Union[int, float] , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ : Optional[Any] , ) ->str:
'''simple docstring'''
_UpperCAmelCase : List[Any] = image.astype(np.floataa )
if offset:
_UpperCAmelCase : Optional[int] = image - (scale / 2)
return rescale(lowerCamelCase__ , scale=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Union[float, List[float]] , lowerCamelCase__ : Union[float, List[float]] , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ : List[Any] , ) ->np.ndarray:
'''simple docstring'''
return normalize(lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : ImageInput , lowerCamelCase__ : bool = None , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : PILImageResampling = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : float = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : Optional[Union[float, List[float]]] = None , lowerCamelCase__ : Optional[Union[float, List[float]]] = None , lowerCamelCase__ : Optional[ChannelDimension] = ChannelDimension.FIRST , ) ->np.ndarray:
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
_UpperCAmelCase : Any = to_numpy_array(lowerCamelCase__ )
if do_resize:
_UpperCAmelCase : List[Any] = self.resize(image=lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ )
if do_center_crop:
_UpperCAmelCase : Union[str, Any] = self.center_crop(lowerCamelCase__ , size=lowerCamelCase__ )
if do_rescale:
_UpperCAmelCase : Union[str, Any] = self.rescale(image=lowerCamelCase__ , scale=lowerCamelCase__ , offset=lowerCamelCase__ )
if do_normalize:
_UpperCAmelCase : List[Any] = self.normalize(image=lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = to_channel_dimension_format(lowerCamelCase__ , lowerCamelCase__ )
return image
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : ImageInput , lowerCamelCase__ : bool = None , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : PILImageResampling = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : float = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : Optional[Union[float, List[float]]] = None , lowerCamelCase__ : Optional[Union[float, List[float]]] = None , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , lowerCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **lowerCamelCase__ : Optional[Any] , ) ->PIL.Image.Image:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase : Optional[Any] = resample if resample is not None else self.resample
_UpperCAmelCase : int = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase : Dict = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase : Optional[Any] = offset if offset is not None else self.offset
_UpperCAmelCase : Any = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase : str = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase : Union[str, Any] = image_std if image_std is not None else self.image_std
_UpperCAmelCase : List[Any] = size if size is not None else self.size
_UpperCAmelCase : List[Any] = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase : List[str] = get_size_dict(lowerCamelCase__ , param_name="crop_size" )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
_UpperCAmelCase : str = make_batched(lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = [
[
self._preprocess_image(
image=lowerCamelCase__ , do_resize=lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ , do_center_crop=lowerCamelCase__ , crop_size=lowerCamelCase__ , do_rescale=lowerCamelCase__ , rescale_factor=lowerCamelCase__ , offset=lowerCamelCase__ , do_normalize=lowerCamelCase__ , image_mean=lowerCamelCase__ , image_std=lowerCamelCase__ , data_format=lowerCamelCase__ , )
for img in video
]
for video in videos
]
_UpperCAmelCase : Tuple = {"pixel_values": videos}
return BatchFeature(data=lowerCamelCase__ , tensor_type=lowerCamelCase__ )
| 234
|
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Dict = filter(lambda __lowerCAmelCase : p.requires_grad , model.parameters() )
_UpperCAmelCase : Optional[Any] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowerCamelCase__ = logging.getLogger(__name__)
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
if metric == "rouge2":
_UpperCAmelCase : List[str] = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
_UpperCAmelCase : int = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
_UpperCAmelCase : str = "{val_avg_em:.4f}-{step_count}"
elif metric == "loss":
_UpperCAmelCase : List[Any] = "{val_avg_loss:.4f}-{step_count}"
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
" function." )
_UpperCAmelCase : Any = ModelCheckpoint(
dirpath=__lowerCAmelCase , filename=__lowerCAmelCase , monitor=F"""val_{metric}""" , mode="max" , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return EarlyStopping(
monitor=F"""val_{metric}""" , mode="min" if "loss" in metric else "max" , patience=__lowerCAmelCase , verbose=__lowerCAmelCase , )
class lowerCAmelCase__ ( pl.Callback ):
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[int] ) ->int:
'''simple docstring'''
_UpperCAmelCase : Dict = {F"""lr_group_{i}""": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(lowerCamelCase__ )
@rank_zero_only
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : pl.Trainer , lowerCamelCase__ : pl.LightningModule , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, Any]=True ) ->None:
'''simple docstring'''
logger.info(F"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
_UpperCAmelCase : str = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
_UpperCAmelCase : Any = Path(pl_module.hparams.output_dir )
if type_path == "test":
_UpperCAmelCase : List[Any] = od / "test_results.txt"
_UpperCAmelCase : Union[str, Any] = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_UpperCAmelCase : Optional[Any] = od / F"""{type_path}_results/{trainer.global_step:05d}.txt"""
_UpperCAmelCase : Dict = od / F"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=lowerCamelCase__ )
generations_file.parent.mkdir(exist_ok=lowerCamelCase__ )
with open(lowerCamelCase__ , "a+" ) as writer:
for key in sorted(lowerCamelCase__ ):
if key in ["log", "progress_bar", "preds"]:
continue
_UpperCAmelCase : Dict = metrics[key]
if isinstance(lowerCamelCase__ , torch.Tensor ):
_UpperCAmelCase : Tuple = val.item()
_UpperCAmelCase : str = F"""{key}: {val:.6f}\n"""
writer.write(lowerCamelCase__ )
if not save_generations:
return
if "preds" in metrics:
_UpperCAmelCase : Optional[int] = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(lowerCamelCase__ )
@rank_zero_only
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[int] ) ->Any:
'''simple docstring'''
try:
_UpperCAmelCase : Tuple = pl_module.model.model.num_parameters()
except AttributeError:
_UpperCAmelCase : int = pl_module.model.num_parameters()
_UpperCAmelCase : int = count_trainable_parameters(lowerCamelCase__ )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1E6, "grad_mp": n_trainable_pars / 1E6} )
@rank_zero_only
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : pl.Trainer , lowerCamelCase__ : pl.LightningModule ) ->int:
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(lowerCamelCase__ , lowerCamelCase__ , "test" )
@rank_zero_only
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : pl.Trainer , lowerCamelCase__ : Union[str, Any] ) ->Union[str, Any]:
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 234
| 1
|
from pathlib import Path
import fire
def A ( lowercase , lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = Path(__UpperCAmelCase )
UpperCamelCase = Path(__UpperCAmelCase )
dest_dir.mkdir(exist_ok=__UpperCAmelCase )
for path in src_dir.iterdir():
UpperCamelCase = [x.rstrip() for x in list(path.open().readlines() )][:n]
UpperCamelCase = dest_dir.joinpath(path.name )
print(__UpperCAmelCase )
dest_path.open('w' ).write('\n'.join(__UpperCAmelCase ) )
if __name__ == "__main__":
fire.Fire(minify)
| 356
|
from __future__ import annotations
def A ( lowercase , lowercase , lowercase , lowercase ) -> list:
'''simple docstring'''
UpperCamelCase = []
UpperCamelCase , UpperCamelCase = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
UpperCamelCase = result + left + right
return input_list
def A ( lowercase ) -> list:
'''simple docstring'''
if len(lowercase ) <= 1:
return input_list
UpperCamelCase = list(lowercase )
# iteration for two-way merging
UpperCamelCase = 2
while p <= len(lowercase ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(lowercase ) , lowercase ):
UpperCamelCase = i
UpperCamelCase = i + p - 1
UpperCamelCase = (low + high + 1) // 2
UpperCamelCase = merge(lowercase , lowercase , lowercase , lowercase )
# final merge of last two parts
if p * 2 >= len(lowercase ):
UpperCamelCase = i
UpperCamelCase = merge(lowercase , 0 , lowercase , len(lowercase ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
_UpperCAmelCase : Any = input("Enter numbers separated by a comma:\n").strip()
if user_input == "":
_UpperCAmelCase : Optional[Any] = []
else:
_UpperCAmelCase : Any = [int(item.strip()) for item in user_input.split(",")]
print(iter_merge_sort(unsorted))
| 110
| 0
|
'''simple docstring'''
from ... import PretrainedConfig
snake_case_ : List[Any] = {
"sijunhe/nezha-cn-base": "https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json",
}
class __a (lowerCamelCase ):
__a : Union[str, Any] = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
__a : Optional[Any] = "nezha"
def __init__( self : List[str] , __magic_name__ : Any=2_11_28 , __magic_name__ : str=7_68 , __magic_name__ : Tuple=12 , __magic_name__ : Optional[int]=12 , __magic_name__ : str=30_72 , __magic_name__ : str="gelu" , __magic_name__ : List[str]=0.1 , __magic_name__ : List[Any]=0.1 , __magic_name__ : Optional[Any]=5_12 , __magic_name__ : Any=64 , __magic_name__ : Optional[int]=2 , __magic_name__ : List[Any]=0.0_2 , __magic_name__ : Tuple=1E-12 , __magic_name__ : int=0.1 , __magic_name__ : List[str]=0 , __magic_name__ : str=2 , __magic_name__ : Tuple=3 , __magic_name__ : List[Any]=True , **__magic_name__ : List[Any] , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
UpperCAmelCase_ : Optional[Any] = vocab_size
UpperCAmelCase_ : Optional[Any] = hidden_size
UpperCAmelCase_ : str = num_hidden_layers
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : Optional[Any] = hidden_act
UpperCAmelCase_ : int = intermediate_size
UpperCAmelCase_ : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : int = max_position_embeddings
UpperCAmelCase_ : Any = max_relative_position
UpperCAmelCase_ : Optional[int] = type_vocab_size
UpperCAmelCase_ : str = initializer_range
UpperCAmelCase_ : List[Any] = layer_norm_eps
UpperCAmelCase_ : Union[str, Any] = classifier_dropout
UpperCAmelCase_ : Optional[Any] = use_cache
| 125
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import MutableSequence
class __a :
def __init__( self : int , __magic_name__ : int , __magic_name__ : MutableSequence[float] ) -> None:
"""simple docstring"""
if len(__magic_name__ ) != degree + 1:
raise ValueError(
'''The number of coefficients should be equal to the degree + 1.''' )
UpperCAmelCase_ : list[float] = list(__magic_name__ )
UpperCAmelCase_ : List[str] = degree
def __add__( self : List[str] , __magic_name__ : Polynomial ) -> Polynomial:
"""simple docstring"""
if self.degree > polynomial_a.degree:
UpperCAmelCase_ : Dict = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , __magic_name__ )
else:
UpperCAmelCase_ : List[str] = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , __magic_name__ )
def __sub__( self : Dict , __magic_name__ : Polynomial ) -> Polynomial:
"""simple docstring"""
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : List[Any] ) -> Polynomial:
"""simple docstring"""
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : str , __magic_name__ : Polynomial ) -> Polynomial:
"""simple docstring"""
UpperCAmelCase_ : list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , __magic_name__ )
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : int | float ) -> int | float:
"""simple docstring"""
UpperCAmelCase_ : int | float = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : Optional[int] ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = ''''''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(__magic_name__ )
return polynomial
def __repr__( self : List[Any] ) -> str:
"""simple docstring"""
return self.__str__()
def UpperCAmelCase__ ( self : List[str] ) -> Polynomial:
"""simple docstring"""
UpperCAmelCase_ : list[float] = [0] * self.degree
for i in range(self.degree ):
UpperCAmelCase_ : List[str] = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , __magic_name__ )
def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : int | float = 0 ) -> Polynomial:
"""simple docstring"""
UpperCAmelCase_ : list[float] = [0] * (self.degree + 2)
UpperCAmelCase_ : Union[str, Any] = constant
for i in range(self.degree + 1 ):
UpperCAmelCase_ : Optional[Any] = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , __magic_name__ )
def __eq__( self : Any , __magic_name__ : object ) -> bool:
"""simple docstring"""
if not isinstance(__magic_name__ , __magic_name__ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : List[Any] , __magic_name__ : object ) -> bool:
"""simple docstring"""
return not self.__eq__(__magic_name__ )
| 125
| 1
|
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class lowerCAmelCase :
def __init__( self :Optional[Any] , _lowercase :str = "cpu" , _lowercase :str = "openai/clip-vit-large-patch14" ):
'''simple docstring'''
lowercase__ = device
lowercase__ = CLIPTokenizerFast.from_pretrained(_lowercase )
lowercase__ = [0.48145466, 0.4578275, 0.40821073]
lowercase__ = [0.26862954, 0.26130258, 0.27577711]
lowercase__ = torchvision.transforms.Normalize(self.image_mean , self.image_std )
lowercase__ = torchvision.transforms.Resize(2_24 )
lowercase__ = torchvision.transforms.CenterCrop(2_24 )
def UpperCAmelCase ( self :Dict , _lowercase :Optional[int] ):
'''simple docstring'''
lowercase__ = self.resize(_lowercase )
lowercase__ = self.center_crop(_lowercase )
lowercase__ = self.normalize(_lowercase )
return images
def __call__( self :Any , _lowercase :List[Any]=None , _lowercase :Any=None , **_lowercase :str ):
'''simple docstring'''
lowercase__ = self.tokenizer(text=_lowercase , **_lowercase )
lowercase__ = self.preprocess_img(_lowercase )
lowercase__ = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class lowerCAmelCase ( nn.Module ):
def __init__( self :Optional[Any] , _lowercase :List[Any]=10 , _lowercase :int=0.01 , _lowercase :Tuple=None , _lowercase :Any=None , _lowercase :Optional[int]=None , _lowercase :List[Any]=None , _lowercase :Tuple=None , _lowercase :Tuple=None , _lowercase :List[Any]=False , _lowercase :Optional[Any]=True , _lowercase :Dict="image" , _lowercase :Any=True , _lowercase :Tuple=False , _lowercase :str=False , _lowercase :int=False , ):
'''simple docstring'''
super().__init__()
lowercase__ = None
lowercase__ = device if device else get_device()
if vqgan:
lowercase__ = vqgan
else:
lowercase__ = load_vqgan(self.device , conf_path=_lowercase , ckpt_path=_lowercase )
self.vqgan.eval()
if clip:
lowercase__ = clip
else:
lowercase__ = CLIPModel.from_pretrained("openai/clip-vit-base-patch32" )
self.clip.to(self.device )
lowercase__ = ProcessorGradientFlow(device=self.device )
lowercase__ = iterations
lowercase__ = lr
lowercase__ = log
lowercase__ = make_grid
lowercase__ = return_val
lowercase__ = quantize
lowercase__ = self.vqgan.decoder.z_shape
def UpperCAmelCase ( self :Optional[int] , _lowercase :Tuple=None , _lowercase :List[str]=None , _lowercase :int=5 , _lowercase :Optional[int]=True ):
'''simple docstring'''
lowercase__ = []
if output_path is None:
lowercase__ = """./animation.gif"""
if input_path is None:
lowercase__ = self.save_path
lowercase__ = sorted(glob(input_path + "/*" ) )
if not len(_lowercase ):
raise ValueError(
"No images found in save path, aborting (did you pass save_intermediate=True to the generate"
" function?)" )
if len(_lowercase ) == 1:
print("Only one image found in save path, (did you pass save_intermediate=True to the generate function?)" )
lowercase__ = total_duration / len(_lowercase )
lowercase__ = [frame_duration] * len(_lowercase )
if extend_frames:
lowercase__ = 1.5
lowercase__ = 3
for file_name in paths:
if file_name.endswith(".png" ):
images.append(imageio.imread(_lowercase ) )
imageio.mimsave(_lowercase , _lowercase , duration=_lowercase )
print(f'''gif saved to {output_path}''' )
def UpperCAmelCase ( self :Optional[Any] , _lowercase :List[Any]=None , _lowercase :int=None ):
'''simple docstring'''
if not (path or img):
raise ValueError("Input either path or tensor" )
if img is not None:
raise NotImplementedError
lowercase__ = preprocess(Image.open(_lowercase ) , target_image_size=2_56 ).to(self.device )
lowercase__ = preprocess_vqgan(_lowercase )
lowercase__ = self.vqgan.encode(_lowercase )
return z
def UpperCAmelCase ( self :Tuple , _lowercase :Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.latent.detach().requires_grad_()
lowercase__ = base_latent + transform_vector
if self.quantize:
lowercase__ = self.vqgan.quantize(_lowercase )
else:
lowercase__ = trans_latent
return self.vqgan.decode(_lowercase )
def UpperCAmelCase ( self :int , _lowercase :Dict , _lowercase :Dict , _lowercase :Tuple=None ):
'''simple docstring'''
lowercase__ = self.clip_preprocessor(text=_lowercase , images=_lowercase , return_tensors="pt" , padding=_lowercase )
lowercase__ = self.clip(**_lowercase )
lowercase__ = clip_outputs.logits_per_image
if weights is not None:
lowercase__ = similarity_logits * weights
return similarity_logits.sum()
def UpperCAmelCase ( self :Dict , _lowercase :Dict , _lowercase :Dict , _lowercase :Any ):
'''simple docstring'''
lowercase__ = self._get_clip_similarity(pos_prompts["prompts"] , _lowercase , weights=(1 / pos_prompts["weights"]) )
if neg_prompts:
lowercase__ = self._get_clip_similarity(neg_prompts["prompts"] , _lowercase , weights=neg_prompts["weights"] )
else:
lowercase__ = torch.tensor([1] , device=self.device )
lowercase__ = -torch.log(_lowercase ) + torch.log(_lowercase )
return loss
def UpperCAmelCase ( self :Optional[Any] , _lowercase :str , _lowercase :Optional[int] , _lowercase :List[Any] ):
'''simple docstring'''
lowercase__ = torch.randn_like(self.latent , requires_grad=_lowercase , device=self.device )
lowercase__ = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
lowercase__ = self._add_vector(_lowercase )
lowercase__ = loop_post_process(_lowercase )
lowercase__ = self._get_CLIP_loss(_lowercase , _lowercase , _lowercase )
print("CLIP loss" , _lowercase )
if self.log:
wandb.log({"CLIP Loss": clip_loss} )
clip_loss.backward(retain_graph=_lowercase )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCAmelCase ( self :Tuple , _lowercase :Any , _lowercase :str , _lowercase :int ):
'''simple docstring'''
wandb.init(reinit=_lowercase , project="face-editor" )
wandb.config.update({"Positive Prompts": positive_prompts} )
wandb.config.update({"Negative Prompts": negative_prompts} )
wandb.config.update({"lr": self.lr, "iterations": self.iterations} )
if image_path:
lowercase__ = Image.open(_lowercase )
lowercase__ = image.resize((2_56, 2_56) )
wandb.log("Original Image" , wandb.Image(_lowercase ) )
def UpperCAmelCase ( self :Tuple , _lowercase :str ):
'''simple docstring'''
if not prompts:
return []
lowercase__ = []
lowercase__ = []
if isinstance(_lowercase , _lowercase ):
lowercase__ = [prompt.strip() for prompt in prompts.split("|" )]
for prompt in prompts:
if isinstance(_lowercase , (tuple, list) ):
lowercase__ = prompt[0]
lowercase__ = float(prompt[1] )
elif ":" in prompt:
lowercase__ = prompt.split(":" )
lowercase__ = float(_lowercase )
else:
lowercase__ = prompt
lowercase__ = 1.0
processed_prompts.append(_lowercase )
weights.append(_lowercase )
return {
"prompts": processed_prompts,
"weights": torch.tensor(_lowercase , device=self.device ),
}
def UpperCAmelCase ( self :Optional[Any] , _lowercase :Dict , _lowercase :Tuple=None , _lowercase :int=None , _lowercase :Optional[int]=True , _lowercase :Union[str, Any]=False , _lowercase :Union[str, Any]=True , _lowercase :Union[str, Any]=True , _lowercase :Dict=None , ):
'''simple docstring'''
if image_path:
lowercase__ = self._get_latent(_lowercase )
else:
lowercase__ = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(_lowercase , _lowercase , _lowercase )
assert pos_prompts, "You must provide at least one positive prompt."
lowercase__ = self.process_prompts(_lowercase )
lowercase__ = self.process_prompts(_lowercase )
if save_final and save_path is None:
lowercase__ = os.path.join("./outputs/" , "_".join(pos_prompts["prompts"] ) )
if not os.path.exists(_lowercase ):
os.makedirs(_lowercase )
else:
lowercase__ = save_path + """_""" + get_timestamp()
os.makedirs(_lowercase )
lowercase__ = save_path
lowercase__ = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("Original Image" )
show_pil(custom_to_pil(_lowercase ) )
lowercase__ = loop_post_process(_lowercase )
for iter, transformed_img in enumerate(self._optimize_CLIP(_lowercase , _lowercase , _lowercase ) ):
if show_intermediate:
show_pil(_lowercase )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , f'''iter_{iter:03d}.png''' ) )
if self.log:
wandb.log({"Image": wandb.Image(_lowercase )} )
if show_final:
show_pil(_lowercase )
if save_final:
transformed_img.save(os.path.join(self.save_path , f'''iter_{iter:03d}_final.png''' ) )
| 356
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
__lowerCamelCase = ViTImageProcessor if is_vision_available() else None
@property
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
lowercase__ = (3, 32, 1_28)
lowercase__ = tempfile.mkdtemp()
# fmt: off
lowercase__ = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
lowercase__ = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_lowercase ) + "\n" )
lowercase__ = {
"do_normalize": False,
"do_resize": True,
"image_processor_type": "ViTImageProcessor",
"resample": 3,
"size": {"height": 32, "width": 1_28},
}
lowercase__ = os.path.join(self.tmpdirname , _lowercase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(_lowercase , _lowercase )
def UpperCAmelCase ( self :Optional[Any] , **_lowercase :str ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def UpperCAmelCase ( self :List[Any] , **_lowercase :List[str] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_lowercase )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self :str ):
'''simple docstring'''
lowercase__ = np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )
lowercase__ = Image.fromarray(np.moveaxis(_lowercase , 0 , -1 ) )
return image_input
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_image_processor()
lowercase__ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase )
processor.save_pretrained(self.tmpdirname )
lowercase__ = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_lowercase )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowercase )
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_image_processor()
lowercase__ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase )
processor.save_pretrained(self.tmpdirname )
lowercase__ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowercase__ = self.get_image_processor(do_normalize=_lowercase , padding_value=1.0 )
lowercase__ = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=_lowercase , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowercase )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase )
lowercase__ = self.prepare_image_inputs()
lowercase__ = image_processor(_lowercase , return_tensors="np" )
lowercase__ = processor(images=_lowercase , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase )
lowercase__ = "test"
lowercase__ = processor(text=_lowercase )
lowercase__ = tokenizer(_lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase )
lowercase__ = "test"
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=_lowercase , images=_lowercase )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "labels"] )
# test if it raises when no input is passed
with pytest.raises(_lowercase ):
processor()
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase )
lowercase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ = processor.char_decode(_lowercase )
lowercase__ = tokenizer.batch_decode(_lowercase )
lowercase__ = [seq.replace(" " , "" ) for seq in decoded_tok]
self.assertListEqual(_lowercase , _lowercase )
def UpperCAmelCase ( self :str ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase )
lowercase__ = None
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=_lowercase , images=_lowercase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase )
lowercase__ = torch.randn(1 , 27 , 38 )
lowercase__ = torch.randn(1 , 27 , 5_02_57 )
lowercase__ = torch.randn(1 , 27 , 3_05_22 )
lowercase__ = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["generated_text", "scores", "char_preds", "bpe_preds", "wp_preds"] )
| 201
| 0
|
"""simple docstring"""
from torch import nn
class lowerCamelCase__ ( nn.Module ):
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__()
snake_case : List[Any] = class_size
snake_case : Optional[int] = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
snake_case : Dict = nn.Linear(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : Union[str, Any] = self.mlp(UpperCAmelCase__ )
return logits
| 148
|
def __lowercase ( _A ) -> bool:
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("""Program to check whether a number is a Perfect number or not...""")
UpperCAmelCase__ : Optional[int] = int(input("""Enter number: """).strip())
print(F"""{number} is {'' if perfect(number) else 'not '}a Perfect Number.""")
| 245
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class UpperCamelCase ( unittest.TestCase ):
def __init__( self : str , UpperCAmelCase__ : int , UpperCAmelCase__ : str=13 , UpperCAmelCase__ : Union[str, Any]=7 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : List[Any]=99 , UpperCAmelCase__ : Optional[int]=32 , UpperCAmelCase__ : Optional[int]=5 , UpperCAmelCase__ : Optional[Any]=4 , UpperCAmelCase__ : Tuple=37 , UpperCAmelCase__ : Union[str, Any]="gelu" , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : Union[str, Any]=512 , UpperCAmelCase__ : List[str]=16 , UpperCAmelCase__ : Dict=2 , UpperCAmelCase__ : Dict=0.0_2 , UpperCAmelCase__ : Optional[Any]=4 , ) -> Any:
_a : str = parent
_a : int = batch_size
_a : List[str] = seq_length
_a : Optional[Any] = is_training
_a : List[str] = use_attention_mask
_a : int = use_token_type_ids
_a : int = use_labels
_a : List[Any] = vocab_size
_a : List[str] = hidden_size
_a : Union[str, Any] = num_hidden_layers
_a : str = num_attention_heads
_a : str = intermediate_size
_a : Optional[int] = hidden_act
_a : Dict = hidden_dropout_prob
_a : int = attention_probs_dropout_prob
_a : Optional[Any] = max_position_embeddings
_a : Union[str, Any] = type_vocab_size
_a : Dict = type_sequence_label_size
_a : List[Any] = initializer_range
_a : Tuple = num_choices
def _lowercase ( self : str ) -> int:
_a : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a : List[Any] = None
if self.use_attention_mask:
_a : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_a : Union[str, Any] = None
if self.use_token_type_ids:
_a : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a : Optional[Any] = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowercase ( self : Any ) -> Optional[Any]:
_a : Tuple = self.prepare_config_and_inputs()
_a , _a , _a , _a : Any = config_and_inputs
_a : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class UpperCamelCase ( snake_case_ , unittest.TestCase ):
UpperCamelCase : Dict = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowercase ( self : Tuple ) -> Dict:
_a : int = FlaxAlbertModelTester(self )
@slow
def _lowercase ( self : str ) -> int:
for model_class_name in self.all_model_classes:
_a : Optional[int] = model_class_name.from_pretrained("""albert-base-v2""" )
_a : Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase__ )
@require_flax
class UpperCamelCase ( unittest.TestCase ):
@slow
def _lowercase ( self : Optional[Any] ) -> Union[str, Any]:
_a : List[str] = FlaxAlbertModel.from_pretrained("""albert-base-v2""" )
_a : List[Any] = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_a : Any = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_a : Any = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )[0]
_a : List[Any] = (1, 11, 768)
self.assertEqual(output.shape , UpperCAmelCase__ )
_a : Optional[int] = np.array(
[[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , UpperCAmelCase__ , atol=1E-4 ) )
| 324
|
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"""files""" , [
["""full:README.md""", """dataset_infos.json"""],
["""empty:README.md""", """dataset_infos.json"""],
["""dataset_infos.json"""],
["""full:README.md"""],
] , )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Dict = tmp_path_factory.mktemp("""dset_infos_dir""" )
if "full:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""" ) as f:
f.write("""---\ndataset_info:\n dataset_size: 42\n---""" )
if "empty:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""" ) as f:
f.write("""""" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / """dataset_infos.json""" , """w""" ) as f:
f.write("""{\"default\": {\"dataset_size\": 42}}""" )
_a : Dict = DatasetInfosDict.from_directory(UpperCamelCase__ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 4_2
@pytest.mark.parametrize(
"""dataset_info""" , [
DatasetInfo(),
DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=4_2 , ),
] , )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Optional[int] = str(UpperCamelCase__ )
dataset_info.write_to_directory(UpperCamelCase__ )
_a : Any = DatasetInfo.from_directory(UpperCamelCase__ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(UpperCamelCase__ , """dataset_info.json""" ) )
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : Dict = DatasetInfo(
description="""foo""" , citation="""bar""" , homepage="""https://foo.bar""" , license="""CC0""" , features=Features({"""a""": Value("""int32""" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train""", """num_examples""": 4_2}] , download_checksums={} , download_size=1_3_3_7 , post_processing_size=4_4_2 , dataset_size=1_2_3_4 , size_in_bytes=1_3_3_7 + 4_4_2 + 1_2_3_4 , )
_a : int = dataset_info._to_yaml_dict()
assert sorted(UpperCamelCase__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
_a : List[str] = yaml.safe_dump(UpperCamelCase__ )
_a : Optional[int] = yaml.safe_load(UpperCamelCase__ )
assert dataset_info_yaml_dict == reloaded
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : List[Any] = DatasetInfo()
_a : Any = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"""dataset_infos_dict""" , [
DatasetInfosDict(),
DatasetInfosDict({"""default""": DatasetInfo()} ),
DatasetInfosDict({"""my_config_name""": DatasetInfo()} ),
DatasetInfosDict(
{
"""default""": DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=4_2 , )
} ),
DatasetInfosDict(
{
"""v1""": DatasetInfo(dataset_size=4_2 ),
"""v2""": DatasetInfo(dataset_size=1_3_3_7 ),
} ),
] , )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : List[Any] = str(UpperCamelCase__ )
dataset_infos_dict.write_to_directory(UpperCamelCase__ )
_a : List[Any] = DatasetInfosDict.from_directory(UpperCamelCase__ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
_a : str = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
_a : Dict = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(UpperCamelCase__ , """README.md""" ) )
| 324
| 1
|
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
SCREAMING_SNAKE_CASE__ = "\\n@inproceedings{snover-etal-2006-study,\n title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",\n author = \"Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John\",\n booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",\n month = aug # \" 8-12\",\n year = \"2006\",\n address = \"Cambridge, Massachusetts, USA\",\n publisher = \"Association for Machine Translation in the Americas\",\n url = \"https://aclanthology.org/2006.amta-papers.25\",\n pages = \"223--231\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
SCREAMING_SNAKE_CASE__ = "\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n"
SCREAMING_SNAKE_CASE__ = "\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n 'score' (float): TER score (num_edits / sum_ref_lengths * 100)\n 'num_edits' (int): The cumulative number of edits\n 'ref_length' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}\n\n Example 2:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}\n\n Example 3:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}\n\n Example 4:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}\n\n Example 5:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def _snake_case ( self ) -> Tuple:
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""http://www.cs.umd.edu/~snover/tercom/""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#ter"""] , reference_urls=[
"""https://github.com/jhclark/tercom""",
] , )
def _snake_case ( self , lowercase , lowercase , lowercase = False , lowercase = False , lowercase = False , lowercase = False , ) -> Optional[int]:
lowerCAmelCase = len(references[0] )
if any(len(lowercase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
lowerCAmelCase = [[refs[i] for refs in references] for i in range(lowercase )]
lowerCAmelCase = TER(
normalized=lowercase , no_punct=lowercase , asian_support=lowercase , case_sensitive=lowercase , )
lowerCAmelCase = sb_ter.corpus_score(lowercase , lowercase )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 46
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ = "▁"
SCREAMING_SNAKE_CASE__ = {"vocab_file": "spiece.model"}
SCREAMING_SNAKE_CASE__ = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"}
}
SCREAMING_SNAKE_CASE__ = {
"google/pegasus-xsum": 512,
}
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask']
def __init__( self , lowercase , lowercase="<pad>" , lowercase="</s>" , lowercase="<unk>" , lowercase="<mask_2>" , lowercase="<mask_1>" , lowercase=None , lowercase=103 , lowercase = None , **lowercase , ) -> None:
lowerCAmelCase = offset
if additional_special_tokens is not None:
if not isinstance(lowercase , lowercase ):
raise TypeError(
f'additional_special_tokens should be of type {type(lowercase )}, but is'
f' {type(lowercase )}' )
lowerCAmelCase = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'<unk_{i}>' for i in range(len(lowercase ) , self.offset - 1 )
]
if len(set(lowercase ) ) != len(lowercase ):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
f' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.' )
lowerCAmelCase = additional_special_tokens_extended
else:
lowerCAmelCase = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'<unk_{i}>' for i in range(2 , self.offset )]
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowercase , unk_token=lowercase , mask_token=lowercase , pad_token=lowercase , mask_token_sent=lowercase , offset=lowercase , additional_special_tokens=lowercase , sp_model_kwargs=self.sp_model_kwargs , **lowercase , )
lowerCAmelCase = mask_token_sent
lowerCAmelCase = vocab_file
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase )
# add special tokens to encoder dict
lowerCAmelCase = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
lowerCAmelCase = {v: k for k, v in self.encoder.items()}
@property
def _snake_case ( self ) -> int:
return len(self.sp_model ) + self.offset
def _snake_case ( self ) -> Dict[str, int]:
lowerCAmelCase = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[int]:
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self , lowercase ) -> List[Any]:
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _snake_case ( self , lowercase ) -> List[str]:
return self.sp_model.encode(lowercase , out_type=lowercase )
def _snake_case ( self , lowercase ) -> int:
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
lowerCAmelCase = self.sp_model.piece_to_id(lowercase )
return sp_id + self.offset
def _snake_case ( self , lowercase ) -> str:
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
lowerCAmelCase = self.sp_model.IdToPiece(index - self.offset )
return token
def _snake_case ( self , lowercase ) -> Optional[int]:
lowerCAmelCase = []
lowerCAmelCase = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowercase ) + token
lowerCAmelCase = []
else:
current_sub_tokens.append(lowercase )
out_string += self.sp_model.decode(lowercase )
return out_string.strip()
def _snake_case ( self , lowercase=False ) -> Tuple:
return 1
def _snake_case ( self , lowercase ) -> Tuple:
lowerCAmelCase = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def _snake_case ( self , lowercase , lowercase = None , lowercase = False ) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(lowercase )
elif token_ids_a is None:
return self._special_token_mask(lowercase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _snake_case ( self , lowercase , lowercase=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _snake_case ( self , lowercase , lowercase = None ) -> Tuple[str]:
if not os.path.isdir(lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase = os.path.join(
lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase , """wb""" ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(lowercase )
return (out_vocab_file,)
| 46
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a : Optional[Any] = {
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Tuple = [
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = [
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
a : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 370
|
from collections import defaultdict
class _a :
def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[str]:
UpperCAmelCase_: Optional[int] = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
UpperCAmelCase_: List[Any] = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(SCREAMING_SNAKE_CASE_ ) )
]
UpperCAmelCase_: Union[str, Any] = defaultdict(SCREAMING_SNAKE_CASE_ ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
UpperCAmelCase_: List[Any] = (1 << len(SCREAMING_SNAKE_CASE_ )) - 1
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
UpperCAmelCase_: List[Any] = self.count_ways_until(SCREAMING_SNAKE_CASE_, task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p), task_no + 1 )
# save the value.
UpperCAmelCase_: List[Any] = total_ways_util
return self.dp[mask][task_no]
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> str:
# Store the list of persons for each task
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
for j in task_performed[i]:
self.task[j].append(SCREAMING_SNAKE_CASE_ )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0, 1 )
if __name__ == "__main__":
a : Optional[Any] = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
a : Optional[Any] = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 82
| 0
|
"""simple docstring"""
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def UpperCAmelCase ( ) -> List[str]:
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(UpperCAmelCase ):
requests.request('GET' , 'https://huggingface.co' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('GET' , 'https://huggingface.co' , timeout=1.0 )
@pytest.mark.integration
def UpperCAmelCase ( ) -> Dict:
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('GET' , 'https://huggingface.co' )
def UpperCAmelCase ( ) -> List[Any]:
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(UpperCAmelCase ):
http_head('https://huggingface.co' )
| 69
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase = {
'configuration_tapas': ['TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TapasConfig'],
'tokenization_tapas': ['TapasTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TapasForMaskedLM',
'TapasForQuestionAnswering',
'TapasForSequenceClassification',
'TapasModel',
'TapasPreTrainedModel',
'load_tf_weights_in_tapas',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFTapasForMaskedLM',
'TFTapasForQuestionAnswering',
'TFTapasForSequenceClassification',
'TFTapasModel',
'TFTapasPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 110
| 0
|
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
lowercase_ = 256047
lowercase_ = 256145
@require_sentencepiece
@require_tokenizers
class A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = NllbTokenizer
lowerCamelCase = NllbTokenizerFast
lowerCamelCase = True
lowerCamelCase = True
lowerCamelCase = {}
def snake_case__ ( self : Optional[int] )-> Dict:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
A__ = NllbTokenizer(lowercase_,keep_accents=lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case__ ( self : Tuple )-> Tuple:
'''simple docstring'''
A__ = NllbTokenizer(lowercase_,keep_accents=lowercase_ )
A__ = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowercase_,['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_ ),[value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]],)
A__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowercase_,[
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
],)
A__ = tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(
lowercase_,[
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
],)
A__ = tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_,[
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
],)
def snake_case__ ( self : List[Any] )-> List[Any]:
'''simple docstring'''
A__ = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-nllb', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A__ = self.rust_tokenizer_class.from_pretrained(lowercase_,**lowercase_ )
A__ = self.tokenizer_class.from_pretrained(lowercase_,**lowercase_ )
A__ = tempfile.mkdtemp()
A__ = tokenizer_r.save_pretrained(lowercase_ )
A__ = tokenizer_p.save_pretrained(lowercase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
A__ = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(lowercase_,lowercase_ )
# Checks everything loads correctly in the same way
A__ = tokenizer_r.from_pretrained(lowercase_ )
A__ = tokenizer_p.from_pretrained(lowercase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_,lowercase_ ) )
shutil.rmtree(lowercase_ )
# Save tokenizer rust, legacy_format=True
A__ = tempfile.mkdtemp()
A__ = tokenizer_r.save_pretrained(lowercase_,legacy_format=lowercase_ )
A__ = tokenizer_p.save_pretrained(lowercase_ )
# Checks it save with the same files
self.assertSequenceEqual(lowercase_,lowercase_ )
# Checks everything loads correctly in the same way
A__ = tokenizer_r.from_pretrained(lowercase_ )
A__ = tokenizer_p.from_pretrained(lowercase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_,lowercase_ ) )
shutil.rmtree(lowercase_ )
# Save tokenizer rust, legacy_format=False
A__ = tempfile.mkdtemp()
A__ = tokenizer_r.save_pretrained(lowercase_,legacy_format=lowercase_ )
A__ = tokenizer_p.save_pretrained(lowercase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
A__ = tokenizer_r.from_pretrained(lowercase_ )
A__ = tokenizer_p.from_pretrained(lowercase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_,lowercase_ ) )
shutil.rmtree(lowercase_ )
@require_torch
def snake_case__ ( self : Optional[Any] )-> str:
'''simple docstring'''
if not self.test_seqaseq:
return
A__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Longer text that will definitely require truncation.
A__ = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for'
' Syria is that \'there is no military solution\' to the nearly five-year conflict and more weapons'
' will only worsen the violence and misery for millions of people.',
]
A__ = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al'
' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi'
' că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
try:
A__ = tokenizer.prepare_seqaseq_batch(
src_texts=lowercase_,tgt_texts=lowercase_,max_length=3,max_target_length=1_0,return_tensors='pt',src_lang='eng_Latn',tgt_lang='ron_Latn',)
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1],3 )
self.assertEqual(batch.labels.shape[1],1_0 )
# max_target_length will default to max_length if not specified
A__ = tokenizer.prepare_seqaseq_batch(
lowercase_,tgt_texts=lowercase_,max_length=3,return_tensors='pt' )
self.assertEqual(batch.input_ids.shape[1],3 )
self.assertEqual(batch.labels.shape[1],3 )
A__ = tokenizer.prepare_seqaseq_batch(
src_texts=lowercase_,max_length=3,max_target_length=1_0,return_tensors='pt' )
self.assertEqual(batch_encoder_only.input_ids.shape[1],3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1],3 )
self.assertNotIn('decoder_input_ids',lowercase_ )
@unittest.skip('Unfortunately way too slow to build a BPE with SentencePiece.' )
def snake_case__ ( self : Tuple )-> Optional[Any]:
'''simple docstring'''
pass
def snake_case__ ( self : Any )-> List[Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A__ = [AddedToken('<special>',lstrip=lowercase_ )]
A__ = self.rust_tokenizer_class.from_pretrained(
lowercase_,additional_special_tokens=lowercase_,**lowercase_ )
A__ = tokenizer_r.encode('Hey this is a <special> token' )
A__ = tokenizer_r.encode('<special>',add_special_tokens=lowercase_ )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
A__ = self.rust_tokenizer_class.from_pretrained(
lowercase_,additional_special_tokens=lowercase_,**lowercase_,)
A__ = self.tokenizer_class.from_pretrained(
lowercase_,additional_special_tokens=lowercase_,**lowercase_ )
A__ = tokenizer_p.encode('Hey this is a <special> token' )
A__ = tokenizer_cr.encode('Hey this is a <special> token' )
self.assertEqual(lowercase_,lowercase_ )
self.assertEqual(lowercase_,lowercase_ )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class A ( unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = 'facebook/nllb-200-distilled-600M'
lowerCamelCase = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
lowerCamelCase = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
lowerCamelCase = [
25_60_47,
1_62_97,
13_44_08,
81_65,
24_80_66,
1_47_34,
9_50,
11_35,
10_57_21,
35_73,
83,
2_73_52,
1_08,
4_94_86,
2,
]
@classmethod
def snake_case__ ( cls : Tuple )-> List[str]:
'''simple docstring'''
A__ = NllbTokenizer.from_pretrained(
cls.checkpoint_name,src_lang='eng_Latn',tgt_lang='ron_Latn' )
A__ = 1
return cls
def snake_case__ ( self : Optional[int] )-> Any:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ace_Arab'],2_5_6_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ace_Latn'],2_5_6_0_0_2 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['fra_Latn'],2_5_6_0_5_7 )
def snake_case__ ( self : Optional[Any] )-> List[str]:
'''simple docstring'''
A__ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens,lowercase_ )
def snake_case__ ( self : Tuple )-> List[str]:
'''simple docstring'''
self.assertIn(lowercase_,self.tokenizer.all_special_ids )
# fmt: off
A__ = [RO_CODE, 4_2_5_4, 9_8_0_6_8, 1_1_2_9_2_3, 3_9_0_7_2, 3_9_0_9, 7_1_3, 1_0_2_7_6_7, 2_6, 1_7_3_1_4, 3_5_6_4_2, 1_4_6_8_3, 3_3_1_1_8, 2_0_2_2, 6_6_9_8_7, 2, 2_5_6_0_4_7]
# fmt: on
A__ = self.tokenizer.decode(lowercase_,skip_special_tokens=lowercase_ )
A__ = self.tokenizer.decode(generated_ids[1:],skip_special_tokens=lowercase_ )
self.assertEqual(lowercase_,lowercase_ )
self.assertNotIn(self.tokenizer.eos_token,lowercase_ )
def snake_case__ ( self : Tuple )-> Dict:
'''simple docstring'''
A__ = ['this is gunna be a long sentence ' * 2_0]
assert isinstance(src_text[0],lowercase_ )
A__ = 1_0
A__ = self.tokenizer(lowercase_,max_length=lowercase_,truncation=lowercase_ ).input_ids[0]
self.assertEqual(ids[-1],2 )
self.assertEqual(ids[0],lowercase_ )
self.assertEqual(len(lowercase_ ),lowercase_ )
def snake_case__ ( self : Dict )-> Optional[Any]:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ),[2_5_6_2_0_3, 3] )
def snake_case__ ( self : str )-> Optional[Any]:
'''simple docstring'''
A__ = tempfile.mkdtemp()
A__ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowercase_ )
A__ = NllbTokenizer.from_pretrained(lowercase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids,lowercase_ )
@require_torch
def snake_case__ ( self : Tuple )-> Dict:
'''simple docstring'''
A__ = self.tokenizer(
self.src_text,text_target=self.tgt_text,padding=lowercase_,truncation=lowercase_,max_length=len(self.expected_src_tokens ),return_tensors='pt',)
A__ = shift_tokens_right(
batch['labels'],self.tokenizer.pad_token_id,self.tokenizer.lang_code_to_id['ron_Latn'] )
self.assertIsInstance(lowercase_,lowercase_ )
self.assertEqual((2, 1_5),batch.input_ids.shape )
self.assertEqual((2, 1_5),batch.attention_mask.shape )
A__ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens,lowercase_ )
self.assertEqual(lowercase_,batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens,[EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens,[self.tokenizer.eos_token_id] )
def snake_case__ ( self : List[str] )-> Tuple:
'''simple docstring'''
A__ = self.tokenizer(self.src_text,padding=lowercase_,truncation=lowercase_,max_length=3,return_tensors='pt' )
A__ = self.tokenizer(
text_target=self.tgt_text,padding=lowercase_,truncation=lowercase_,max_length=1_0,return_tensors='pt' )
A__ = targets['input_ids']
A__ = shift_tokens_right(
lowercase_,self.tokenizer.pad_token_id,decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang],)
self.assertEqual(batch.input_ids.shape[1],3 )
self.assertEqual(batch.decoder_input_ids.shape[1],1_0 )
@require_torch
def snake_case__ ( self : Optional[Any] )-> Dict:
'''simple docstring'''
A__ = self.tokenizer._build_translation_inputs(
'A test',return_tensors='pt',src_lang='eng_Latn',tgt_lang='fra_Latn' )
self.assertEqual(
nested_simplify(lowercase_ ),{
# A, test, EOS, en_XX
'input_ids': [[2_5_6_0_4_7, 7_0, 7_3_5_6, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 2_5_6_0_5_7,
},)
@require_torch
def snake_case__ ( self : Tuple )-> Dict:
'''simple docstring'''
A__ = True
A__ = self.tokenizer(
'UN Chief says there is no military solution in Syria',src_lang='eng_Latn',tgt_lang='fra_Latn' )
self.assertEqual(
inputs.input_ids,[1_6_2_9_7, 1_3_4_4_0_8, 2_5_6_5_3, 6_3_7_0, 2_4_8, 2_5_4, 1_0_3_9_2_9, 9_4_9_9_5, 1_0_8, 4_9_4_8_6, 2, 2_5_6_0_4_7] )
A__ = False
A__ = self.tokenizer(
'UN Chief says there is no military solution in Syria',src_lang='eng_Latn',tgt_lang='fra_Latn' )
self.assertEqual(
inputs.input_ids,[2_5_6_0_4_7, 1_6_2_9_7, 1_3_4_4_0_8, 2_5_6_5_3, 6_3_7_0, 2_4_8, 2_5_4, 1_0_3_9_2_9, 9_4_9_9_5, 1_0_8, 4_9_4_8_6, 2] )
| 282
|
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
lowercase_ = Path(__file__).resolve().parents[3] / "src"
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
lowercase_ = {"base": "patrickvonplaten/wav2vec2_tiny_random", "robust": "patrickvonplaten/wav2vec2_tiny_random_robust"}
lowercase_ = "zero2"
lowercase_ = "zero3"
lowercase_ = [ZEROa, ZEROa]
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Dict:
'''simple docstring'''
A__ = parameterized.to_safe_name('_'.join(str(SCREAMING_SNAKE_CASE__ ) for x in param.args ) )
return f'{func.__name__}_{param_based_name}'
# Cartesian-product of zero stages with models to test
lowercase_ = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class A ( _UpperCAmelCase ):
"""simple docstring"""
@parameterized.expand(lowercase_,name_func=lowercase_ )
def snake_case__ ( self : int,lowercase_ : str,lowercase_ : Any )-> Optional[int]:
'''simple docstring'''
self.run_and_check(
stage=lowercase_,model=lowercase_,distributed=lowercase_,fpaa=lowercase_,)
@require_torch_multi_gpu
@parameterized.expand(lowercase_,name_func=lowercase_ )
def snake_case__ ( self : Union[str, Any],lowercase_ : Optional[Any],lowercase_ : List[Any] )-> int:
'''simple docstring'''
self.run_and_check(
stage=lowercase_,model=lowercase_,distributed=lowercase_,fpaa=lowercase_,)
@parameterized.expand(lowercase_,name_func=lowercase_ )
def snake_case__ ( self : List[str],lowercase_ : List[str],lowercase_ : List[Any] )-> Any:
'''simple docstring'''
self.run_and_check(
stage=lowercase_,model=lowercase_,distributed=lowercase_,fpaa=lowercase_,)
@require_torch_multi_gpu
@parameterized.expand(lowercase_,name_func=lowercase_ )
def snake_case__ ( self : Dict,lowercase_ : Optional[Any],lowercase_ : List[Any] )-> Optional[int]:
'''simple docstring'''
self.run_and_check(
stage=lowercase_,model=lowercase_,distributed=lowercase_,fpaa=lowercase_,)
def snake_case__ ( self : Tuple,lowercase_ : Any )-> Union[str, Any]:
'''simple docstring'''
pass
def snake_case__ ( self : int,lowercase_ : str,lowercase_ : str,lowercase_ : int = 1_0,lowercase_ : bool = True,lowercase_ : bool = True,lowercase_ : bool = True,)-> Union[str, Any]:
'''simple docstring'''
A__ = models[model]
A__ = self.run_trainer(
stage=lowercase_,model_name=lowercase_,eval_steps=lowercase_,num_train_epochs=1,distributed=lowercase_,fpaa=lowercase_,)
self.do_checks(lowercase_ )
return output_dir
def snake_case__ ( self : Union[str, Any],lowercase_ : str,lowercase_ : str,lowercase_ : int = 1_0,lowercase_ : int = 1,lowercase_ : bool = True,lowercase_ : bool = True,)-> Any:
'''simple docstring'''
A__ = self.get_auto_remove_tmp_dir('./xxx',after=lowercase_ )
A__ = F'\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(lowercase_ )}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n '.split()
if fpaa:
args.extend(['--fp16'] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
A__ = F'--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'.split()
A__ = [F'{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py']
A__ = self.get_launcher(lowercase_ )
A__ = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowercase_,env=self.get_env() )
return output_dir
def snake_case__ ( self : Any,lowercase_ : int=False )-> Tuple:
'''simple docstring'''
A__ = min(2,get_gpu_count() ) if distributed else 1
return F'deepspeed --num_nodes 1 --num_gpus {num_gpus}'.split()
| 282
| 1
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def lowerCamelCase__ ( snake_case_ : Tuple=None ) -> Optional[int]:
__snake_case = argparse.ArgumentParser(add_help=snake_case_ , allow_abbrev=snake_case_ )
# The main config parser
__snake_case = config_command_parser(snake_case_ )
# The subparser to add commands to
__snake_case = config_parser.add_subparsers(title='''subcommands''' , dest='''subcommand''' )
# Then add other parsers with the parent parser
default_command_parser(snake_case_ , parents=[parent_parser] )
update_command_parser(snake_case_ , parents=[parent_parser] )
return config_parser
def lowerCamelCase__ ( ) -> Optional[int]:
__snake_case = get_config_parser()
__snake_case = config_parser.parse_args()
if not hasattr(snake_case_ , '''func''' ):
config_parser.print_help()
exit(1 )
# Run
args.func(snake_case_ )
if __name__ == "__main__":
main()
| 24
|
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class lowercase__ :
'''simple docstring'''
def __init__( self, __magic_name__ = "cpu", __magic_name__ = "openai/clip-vit-large-patch14" ) -> None:
"""simple docstring"""
UpperCamelCase__ : List[str] = device
UpperCamelCase__ : Union[str, Any] = CLIPTokenizerFast.from_pretrained(__magic_name__ )
UpperCamelCase__ : Tuple = [0.4814_5466, 0.457_8275, 0.4082_1073]
UpperCamelCase__ : Union[str, Any] = [0.2686_2954, 0.2613_0258, 0.2757_7711]
UpperCamelCase__ : Dict = torchvision.transforms.Normalize(self.image_mean, self.image_std )
UpperCamelCase__ : List[str] = torchvision.transforms.Resize(224 )
UpperCamelCase__ : Union[str, Any] = torchvision.transforms.CenterCrop(224 )
def UpperCamelCase__ ( self, __magic_name__ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = self.resize(__magic_name__ )
UpperCamelCase__ : Dict = self.center_crop(__magic_name__ )
UpperCamelCase__ : List[str] = self.normalize(__magic_name__ )
return images
def __call__( self, __magic_name__=None, __magic_name__=None, **__magic_name__ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = self.tokenizer(text=__magic_name__, **__magic_name__ )
UpperCamelCase__ : List[Any] = self.preprocess_img(__magic_name__ )
UpperCamelCase__ : Optional[Any] = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class lowercase__ ( nn.Module ):
'''simple docstring'''
def __init__( self, __magic_name__=10, __magic_name__=0.01, __magic_name__=None, __magic_name__=None, __magic_name__=None, __magic_name__=None, __magic_name__=None, __magic_name__=None, __magic_name__=False, __magic_name__=True, __magic_name__="image", __magic_name__=True, __magic_name__=False, __magic_name__=False, __magic_name__=False, ) -> None:
"""simple docstring"""
super().__init__()
UpperCamelCase__ : Dict = None
UpperCamelCase__ : Tuple = device if device else get_device()
if vqgan:
UpperCamelCase__ : Union[str, Any] = vqgan
else:
UpperCamelCase__ : Any = load_vqgan(self.device, conf_path=__magic_name__, ckpt_path=__magic_name__ )
self.vqgan.eval()
if clip:
UpperCamelCase__ : Optional[Any] = clip
else:
UpperCamelCase__ : Any = CLIPModel.from_pretrained('''openai/clip-vit-base-patch32''' )
self.clip.to(self.device )
UpperCamelCase__ : str = ProcessorGradientFlow(device=self.device )
UpperCamelCase__ : Union[str, Any] = iterations
UpperCamelCase__ : Tuple = lr
UpperCamelCase__ : Optional[int] = log
UpperCamelCase__ : List[Any] = make_grid
UpperCamelCase__ : Optional[Any] = return_val
UpperCamelCase__ : str = quantize
UpperCamelCase__ : int = self.vqgan.decoder.z_shape
def UpperCamelCase__ ( self, __magic_name__=None, __magic_name__=None, __magic_name__=5, __magic_name__=True ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = []
if output_path is None:
UpperCamelCase__ : List[str] = '''./animation.gif'''
if input_path is None:
UpperCamelCase__ : Union[str, Any] = self.save_path
UpperCamelCase__ : Tuple = sorted(glob(input_path + '''/*''' ) )
if not len(__magic_name__ ):
raise ValueError(
'''No images found in save path, aborting (did you pass save_intermediate=True to the generate'''
''' function?)''' )
if len(__magic_name__ ) == 1:
print('''Only one image found in save path, (did you pass save_intermediate=True to the generate function?)''' )
UpperCamelCase__ : Dict = total_duration / len(__magic_name__ )
UpperCamelCase__ : List[Any] = [frame_duration] * len(__magic_name__ )
if extend_frames:
UpperCamelCase__ : List[Any] = 1.5
UpperCamelCase__ : Any = 3
for file_name in paths:
if file_name.endswith('''.png''' ):
images.append(imageio.imread(__magic_name__ ) )
imageio.mimsave(__magic_name__, __magic_name__, duration=__magic_name__ )
print(f"gif saved to {output_path}" )
def UpperCamelCase__ ( self, __magic_name__=None, __magic_name__=None ) -> Any:
"""simple docstring"""
if not (path or img):
raise ValueError('''Input either path or tensor''' )
if img is not None:
raise NotImplementedError
UpperCamelCase__ : List[Any] = preprocess(Image.open(__magic_name__ ), target_image_size=256 ).to(self.device )
UpperCamelCase__ : str = preprocess_vqgan(__magic_name__ )
UpperCamelCase__ ,*UpperCamelCase__ : Union[str, Any] = self.vqgan.encode(__magic_name__ )
return z
def UpperCamelCase__ ( self, __magic_name__ ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = self.latent.detach().requires_grad_()
UpperCamelCase__ : Any = base_latent + transform_vector
if self.quantize:
UpperCamelCase__ ,*UpperCamelCase__ : int = self.vqgan.quantize(__magic_name__ )
else:
UpperCamelCase__ : Optional[int] = trans_latent
return self.vqgan.decode(__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__=None ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = self.clip_preprocessor(text=__magic_name__, images=__magic_name__, return_tensors='''pt''', padding=__magic_name__ )
UpperCamelCase__ : Optional[int] = self.clip(**__magic_name__ )
UpperCamelCase__ : Tuple = clip_outputs.logits_per_image
if weights is not None:
UpperCamelCase__ : List[Any] = similarity_logits * weights
return similarity_logits.sum()
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ ) -> Any:
"""simple docstring"""
UpperCamelCase__ : List[str] = self._get_clip_similarity(pos_prompts['''prompts'''], __magic_name__, weights=(1 / pos_prompts['''weights''']) )
if neg_prompts:
UpperCamelCase__ : Tuple = self._get_clip_similarity(neg_prompts['''prompts'''], __magic_name__, weights=neg_prompts['''weights'''] )
else:
UpperCamelCase__ : Optional[int] = torch.tensor([1], device=self.device )
UpperCamelCase__ : Tuple = -torch.log(__magic_name__ ) + torch.log(__magic_name__ )
return loss
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : List[str] = torch.randn_like(self.latent, requires_grad=__magic_name__, device=self.device )
UpperCamelCase__ : Optional[int] = torch.optim.Adam([vector], lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
UpperCamelCase__ : Tuple = self._add_vector(__magic_name__ )
UpperCamelCase__ : Any = loop_post_process(__magic_name__ )
UpperCamelCase__ : Union[str, Any] = self._get_CLIP_loss(__magic_name__, __magic_name__, __magic_name__ )
print('''CLIP loss''', __magic_name__ )
if self.log:
wandb.log({'''CLIP Loss''': clip_loss} )
clip_loss.backward(retain_graph=__magic_name__ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ ) -> List[str]:
"""simple docstring"""
wandb.init(reinit=__magic_name__, project='''face-editor''' )
wandb.config.update({'''Positive Prompts''': positive_prompts} )
wandb.config.update({'''Negative Prompts''': negative_prompts} )
wandb.config.update({'''lr''': self.lr, '''iterations''': self.iterations} )
if image_path:
UpperCamelCase__ : List[str] = Image.open(__magic_name__ )
UpperCamelCase__ : List[Any] = image.resize((256, 256) )
wandb.log('''Original Image''', wandb.Image(__magic_name__ ) )
def UpperCamelCase__ ( self, __magic_name__ ) -> Optional[int]:
"""simple docstring"""
if not prompts:
return []
UpperCamelCase__ : int = []
UpperCamelCase__ : str = []
if isinstance(__magic_name__, __magic_name__ ):
UpperCamelCase__ : Optional[Any] = [prompt.strip() for prompt in prompts.split('''|''' )]
for prompt in prompts:
if isinstance(__magic_name__, (tuple, list) ):
UpperCamelCase__ : Optional[int] = prompt[0]
UpperCamelCase__ : Dict = float(prompt[1] )
elif ":" in prompt:
UpperCamelCase__ ,UpperCamelCase__ : Optional[int] = prompt.split(''':''' )
UpperCamelCase__ : List[Any] = float(__magic_name__ )
else:
UpperCamelCase__ : List[str] = prompt
UpperCamelCase__ : Any = 1.0
processed_prompts.append(__magic_name__ )
weights.append(__magic_name__ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(__magic_name__, device=self.device ),
}
def UpperCamelCase__ ( self, __magic_name__, __magic_name__=None, __magic_name__=None, __magic_name__=True, __magic_name__=False, __magic_name__=True, __magic_name__=True, __magic_name__=None, ) -> str:
"""simple docstring"""
if image_path:
UpperCamelCase__ : Union[str, Any] = self._get_latent(__magic_name__ )
else:
UpperCamelCase__ : Dict = torch.randn(self.latent_dim, device=self.device )
if self.log:
self._init_logging(__magic_name__, __magic_name__, __magic_name__ )
assert pos_prompts, "You must provide at least one positive prompt."
UpperCamelCase__ : Optional[Any] = self.process_prompts(__magic_name__ )
UpperCamelCase__ : Union[str, Any] = self.process_prompts(__magic_name__ )
if save_final and save_path is None:
UpperCamelCase__ : str = os.path.join('''./outputs/''', '''_'''.join(pos_prompts['''prompts'''] ) )
if not os.path.exists(__magic_name__ ):
os.makedirs(__magic_name__ )
else:
UpperCamelCase__ : int = save_path + '''_''' + get_timestamp()
os.makedirs(__magic_name__ )
UpperCamelCase__ : Optional[Any] = save_path
UpperCamelCase__ : str = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('''Original Image''' )
show_pil(custom_to_pil(__magic_name__ ) )
UpperCamelCase__ : Optional[Any] = loop_post_process(__magic_name__ )
for iter, transformed_img in enumerate(self._optimize_CLIP(__magic_name__, __magic_name__, __magic_name__ ) ):
if show_intermediate:
show_pil(__magic_name__ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}.png" ) )
if self.log:
wandb.log({'''Image''': wandb.Image(__magic_name__ )} )
if show_final:
show_pil(__magic_name__ )
if save_final:
transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}_final.png" ) )
| 201
| 0
|
UpperCAmelCase_ : List[Any] = 0 # The first color of the flag.
UpperCAmelCase_ : List[Any] = 1 # The second color of the flag.
UpperCAmelCase_ : Union[str, Any] = 2 # The third color of the flag.
UpperCAmelCase_ : List[str] = (red, white, blue)
def SCREAMING_SNAKE_CASE_ ( __A : list ) -> list:
"""simple docstring"""
if not sequence:
return []
if len(__A ) == 1:
return list(__A )
a_ : Optional[int] = 0
a_ : Tuple = len(__A ) - 1
a_ : Optional[int] = 0
while mid <= high:
if sequence[mid] == colors[0]:
a_ , a_ : Tuple = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
a_ , a_ : Union[str, Any] = sequence[high], sequence[mid]
high -= 1
else:
a_ : Optional[Any] = F"""The elements inside the sequence must contains only {colors} values"""
raise ValueError(__A )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : Optional[int] = input('Enter numbers separated by commas:\n').strip()
UpperCAmelCase_ : Optional[int] = [int(item.strip()) for item in user_input.split(',')]
print(F'{dutch_national_flag_sort(unsorted)}')
| 120
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ : Dict = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 120
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any=13 , lowerCAmelCase__ : str=7 , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : int=True , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : str=True , lowerCAmelCase__ : str=99 , lowerCAmelCase__ : str=32 , lowerCAmelCase__ : Optional[int]=5 , lowerCAmelCase__ : Optional[Any]=4 , lowerCAmelCase__ : Tuple=37 , lowerCAmelCase__ : int="gelu" , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : List[str]=0.1 , lowerCAmelCase__ : List[str]=512 , lowerCAmelCase__ : int=16 , lowerCAmelCase__ : int=2 , lowerCAmelCase__ : Dict=0.02 , lowerCAmelCase__ : Any=4 , ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_attention_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_choices
def snake_case__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_attention_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCamelCase = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def snake_case__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Dict = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
_UpperCamelCase = FlaxAlbertModelTester(self )
@slow
def snake_case__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCamelCase = model_class_name.from_pretrained('''albert-base-v2''' )
_UpperCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCAmelCase__ )
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = FlaxAlbertModel.from_pretrained('''albert-base-v2''' )
_UpperCamelCase = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_UpperCamelCase = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_UpperCamelCase = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0]
_UpperCamelCase = (1, 11, 768)
self.assertEqual(output.shape , lowerCAmelCase__ )
_UpperCamelCase = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowerCAmelCase__ , atol=1e-4 ) )
| 324
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any=13 , lowerCAmelCase__ : str=7 , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : int=True , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : str=True , lowerCAmelCase__ : str=99 , lowerCAmelCase__ : str=32 , lowerCAmelCase__ : Optional[int]=5 , lowerCAmelCase__ : Optional[Any]=4 , lowerCAmelCase__ : Tuple=37 , lowerCAmelCase__ : int="gelu" , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : List[str]=0.1 , lowerCAmelCase__ : List[str]=512 , lowerCAmelCase__ : int=16 , lowerCAmelCase__ : int=2 , lowerCAmelCase__ : Dict=0.02 , lowerCAmelCase__ : Any=4 , ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_attention_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_choices
def snake_case__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_attention_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCamelCase = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def snake_case__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Dict = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
_UpperCamelCase = FlaxAlbertModelTester(self )
@slow
def snake_case__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCamelCase = model_class_name.from_pretrained('''albert-base-v2''' )
_UpperCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCAmelCase__ )
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = FlaxAlbertModel.from_pretrained('''albert-base-v2''' )
_UpperCamelCase = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_UpperCamelCase = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_UpperCamelCase = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0]
_UpperCamelCase = (1, 11, 768)
self.assertEqual(output.shape , lowerCAmelCase__ )
_UpperCamelCase = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowerCAmelCase__ , atol=1e-4 ) )
| 324
| 1
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
a = tempfile.mkdtemp()
a = BlipImageProcessor()
a = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
a = BlipaProcessor(__lowerCamelCase ,__lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ,**__lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname ,**__lowerCamelCase ).tokenizer
def SCREAMING_SNAKE_CASE_ ( self : Dict ,**__lowerCamelCase : Tuple ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname ,**__lowerCamelCase ).image_processor
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
a = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )]
a = [Image.fromarray(np.moveaxis(__lowerCamelCase ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
a = BlipaProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a = self.get_tokenizer(bos_token='''(BOS)''' ,eos_token='''(EOS)''' )
a = self.get_image_processor(do_normalize=__lowerCamelCase ,padding_value=1.0 )
a = BlipaProcessor.from_pretrained(
self.tmpdirname ,bos_token='''(BOS)''' ,eos_token='''(EOS)''' ,do_normalize=__lowerCamelCase ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,__lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
a = self.get_image_processor()
a = self.get_tokenizer()
a = BlipaProcessor(tokenizer=__lowerCamelCase ,image_processor=__lowerCamelCase )
a = self.prepare_image_inputs()
a = image_processor(__lowerCamelCase ,return_tensors='''np''' )
a = processor(images=__lowerCamelCase ,return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
a = self.get_image_processor()
a = self.get_tokenizer()
a = BlipaProcessor(tokenizer=__lowerCamelCase ,image_processor=__lowerCamelCase )
a = '''lower newer'''
a = processor(text=__lowerCamelCase )
a = tokenizer(__lowerCamelCase ,return_token_type_ids=__lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
a = self.get_image_processor()
a = self.get_tokenizer()
a = BlipaProcessor(tokenizer=__lowerCamelCase ,image_processor=__lowerCamelCase )
a = '''lower newer'''
a = self.prepare_image_inputs()
a = processor(text=__lowerCamelCase ,images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) ,['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(__lowerCamelCase ):
processor()
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
a = self.get_image_processor()
a = self.get_tokenizer()
a = BlipaProcessor(tokenizer=__lowerCamelCase ,image_processor=__lowerCamelCase )
a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a = processor.batch_decode(__lowerCamelCase )
a = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase ,__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
a = self.get_image_processor()
a = self.get_tokenizer()
a = BlipaProcessor(tokenizer=__lowerCamelCase ,image_processor=__lowerCamelCase )
a = '''lower newer'''
a = self.prepare_image_inputs()
a = processor(text=__lowerCamelCase ,images=__lowerCamelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) ,['''pixel_values''', '''input_ids''', '''attention_mask'''] )
| 330
|
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Tuple:
"""simple docstring"""
a = FileLock(str(tmpdir / '''foo.lock''' ) )
a = FileLock(str(tmpdir / '''foo.lock''' ) )
a = 0.01
with locka.acquire():
with pytest.raises(snake_case_ ):
a = time.time()
locka.acquire(snake_case_ )
assert time.time() - _start > timeout
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Optional[int]:
"""simple docstring"""
a = '''a''' * 1_0_0_0 + '''.lock'''
a = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('''.lock''' )
assert not locka._lock_file.endswith(snake_case_ )
assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5
a = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(snake_case_ ):
locka.acquire(0 )
| 330
| 1
|
'''simple docstring'''
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
_SCREAMING_SNAKE_CASE : Optional[Any] = "."
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Dict = os.path.join(REPO_PATH, "utils/documentation_tests.txt")
_SCREAMING_SNAKE_CASE : str = []
_SCREAMING_SNAKE_CASE : Optional[Any] = []
with open(doctest_file_path) as fp:
for line in fp:
_SCREAMING_SNAKE_CASE : Optional[int] = line.strip()
_SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
_SCREAMING_SNAKE_CASE : List[str] = "\n".join(non_existent_paths)
raise ValueError(F"`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}")
if all_paths != sorted(all_paths):
raise ValueError("Files in `utils/documentation_tests.txt` are not in alphabetical order.")
| 85
|
A__ = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
A__ = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def _UpperCAmelCase ( snake_case , snake_case , snake_case ):
"""simple docstring"""
_lowerCAmelCase = True
_lowerCAmelCase = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(snake_case , snake_case , snake_case )
order.append(snake_case )
return order
def _UpperCAmelCase ( snake_case , snake_case , snake_case ):
"""simple docstring"""
_lowerCAmelCase = True
_lowerCAmelCase = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(snake_case , snake_case , snake_case )
return component
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = len(snake_case ) * [False]
_lowerCAmelCase = {vert: [] for vert in range(len(snake_case ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(snake_case )
_lowerCAmelCase = []
for i, was_visited in enumerate(snake_case ):
if not was_visited:
order += topology_sort(snake_case , snake_case , snake_case )
_lowerCAmelCase = []
_lowerCAmelCase = len(snake_case ) * [False]
for i in range(len(snake_case ) ):
_lowerCAmelCase = order[len(snake_case ) - i - 1]
if not visited[vert]:
_lowerCAmelCase = find_components(snake_case , snake_case , snake_case )
components_list.append(snake_case )
return components_list
| 82
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__)
class _A ( lowerCAmelCase ):
snake_case__ : Tuple = ['pixel_values']
def __init__( self , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = PIL.Image.BICUBIC , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = 1 / 255 , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(**lowerCamelCase_ )
lowercase = size if size is not None else {"""height""": 256, """width""": 256}
lowercase = get_size_dict(lowerCamelCase_ )
lowercase = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
lowercase = get_size_dict(lowerCamelCase_ , param_name="""crop_size""" )
lowercase = do_resize
lowercase = size
lowercase = resample
lowercase = do_center_crop
lowercase = crop_size
lowercase = do_rescale
lowercase = rescale_factor
lowercase = do_normalize
lowercase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = PIL.Image.BICUBIC , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
lowercase = get_size_dict(lowerCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}' )
return resize(
lowerCamelCase_ , size=(size["""height"""], size["""width"""]) , resample=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
lowercase = get_size_dict(lowerCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}' )
return center_crop(lowerCamelCase_ , size=(size["""height"""], size["""width"""]) , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
return rescale(lowerCamelCase_ , scale=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
return normalize(lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase=None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = ChannelDimension.FIRST , **__lowerCAmelCase , ):
"""simple docstring"""
lowercase = do_resize if do_resize is not None else self.do_resize
lowercase = resample if resample is not None else self.resample
lowercase = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase = do_rescale if do_rescale is not None else self.do_rescale
lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase = do_normalize if do_normalize is not None else self.do_normalize
lowercase = image_mean if image_mean is not None else self.image_mean
lowercase = image_std if image_std is not None else self.image_std
lowercase = size if size is not None else self.size
lowercase = get_size_dict(lowerCamelCase_ )
lowercase = crop_size if crop_size is not None else self.crop_size
lowercase = get_size_dict(lowerCamelCase_ , param_name="""crop_size""" )
lowercase = make_list_of_images(lowerCamelCase_ )
if not valid_images(lowerCamelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowercase = [to_numpy_array(lowerCamelCase_ ) for image in images]
if do_resize:
lowercase = [self.resize(image=lowerCamelCase_ , size=lowerCamelCase_ , resample=lowerCamelCase_ ) for image in images]
if do_center_crop:
lowercase = [self.center_crop(image=lowerCamelCase_ , size=lowerCamelCase_ ) for image in images]
if do_rescale:
lowercase = [self.rescale(image=lowerCamelCase_ , scale=lowerCamelCase_ ) for image in images]
if do_normalize:
lowercase = [self.normalize(image=lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ ) for image in images]
lowercase = [to_channel_dimension_format(lowerCamelCase_ , lowerCamelCase_ ) for image in images]
lowercase = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase_ , tensor_type=lowerCamelCase_ )
| 354
|
"""simple docstring"""
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("""socket.socket""" )
@patch("""builtins.open""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[str] ) -> Union[str, Any]:
'''simple docstring'''
lowercase = Mock()
lowercase = conn, Mock()
lowercase = iter([1, None] )
lowercase = lambda lowerCAmelCase__ : next(lowerCAmelCase__ )
# ===== invoke =====
send_file(filename="""mytext.txt""" , testing=lowerCAmelCase__ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 32
| 0
|
import comet # From: unbabel-comet
import torch
import datasets
_lowerCamelCase : Union[str, Any] = datasets.logging.get_logger(__name__)
_lowerCamelCase : List[Any] = '''\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = "{COMET}: A Neural Framework for {MT} Evaluation",
author = "Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",
pages = "2685--2702",
}
'''
_lowerCamelCase : Tuple = '''\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
'''
_lowerCamelCase : Any = '''
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric(\'comet\')
>>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use
>>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]
>>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]
>>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results["scores"]])
[0.19, 0.92]
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
'''simple docstring'''
def A ( self : Optional[int] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://unbabel.github.io/COMET/html/index.html' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'sources': datasets.Value('string' , id='sequence' ),
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/Unbabel/COMET'] , reference_urls=[
'https://github.com/Unbabel/COMET',
'https://www.aclweb.org/anthology/2020.emnlp-main.213/',
'http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6',
] , )
def A ( self : Optional[int] , lowercase : List[str] ):
'''simple docstring'''
if self.config_name == "default":
_snake_case = comet.load_from_checkpoint(comet.download_model('wmt20-comet-da' ) )
else:
_snake_case = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def A ( self : Any , lowercase : Tuple , lowercase : Union[str, Any] , lowercase : Optional[Any] , lowercase : str=None , lowercase : str=False ):
'''simple docstring'''
if gpus is None:
_snake_case = 1 if torch.cuda.is_available() else 0
_snake_case = {'src': sources, 'mt': predictions, 'ref': references}
_snake_case = [dict(zip(lowercase , lowercase ) ) for t in zip(*data.values() )]
_snake_case , _snake_case = self.scorer.predict(lowercase , gpus=lowercase , progress_bar=lowercase )
return {"mean_score": mean_score, "scores": scores}
| 282
|
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def a_ ( __lowercase : Sequence[float] , __lowercase : int , __lowercase : int ) -> tuple[int | None, int | None, float]:
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
_snake_case = (low + high) // 2
_snake_case , _snake_case , _snake_case = max_subarray(__lowercase , __lowercase , __lowercase )
_snake_case , _snake_case , _snake_case = max_subarray(__lowercase , mid + 1 , __lowercase )
_snake_case , _snake_case , _snake_case = max_cross_sum(__lowercase , __lowercase , __lowercase , __lowercase )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def a_ ( __lowercase : Sequence[float] , __lowercase : int , __lowercase : int , __lowercase : int ) -> tuple[int, int, float]:
_snake_case , _snake_case = float('-inf' ), -1
_snake_case , _snake_case = float('-inf' ), -1
_snake_case = 0
for i in range(__lowercase , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
_snake_case = summ
_snake_case = i
_snake_case = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
_snake_case = summ
_snake_case = i
return max_left, max_right, (left_sum + right_sum)
def a_ ( __lowercase : int ) -> float:
_snake_case = [randint(1 , __lowercase ) for _ in range(__lowercase )]
_snake_case = time.time()
max_subarray(__lowercase , 0 , input_size - 1 )
_snake_case = time.time()
return end - start
def a_ ( ) -> None:
_snake_case = [10, 100, 1_000, 10_000, 50_000, 100_000, 200_000, 300_000, 400_000, 500_000]
_snake_case = [time_max_subarray(__lowercase ) for input_size in input_sizes]
print('No of Inputs\t\tTime Taken' )
for input_size, runtime in zip(__lowercase , __lowercase ):
print(__lowercase , '\t\t' , __lowercase )
plt.plot(__lowercase , __lowercase )
plt.xlabel('Number of Inputs' )
plt.ylabel('Time taken in seconds' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 282
| 1
|
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
__lowerCamelCase : Dict = flax_key_tuple[:-1] + ('weight',)
__lowerCamelCase : Any = torch.permute(_A , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_A ):
# linear layer
__lowerCamelCase : Optional[Any] = flax_key_tuple[:-1] + ('weight',)
__lowerCamelCase : Union[str, Any] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
__lowerCamelCase : Dict = flax_key_tuple[:-1] + ('weight',)
return flax_key_tuple, flax_tensor
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
if "metadata" in layer:
__lowerCamelCase : int = layer.split('metadata' )
__lowerCamelCase : int = ''.join(split_layer[0] )[:-1]
__lowerCamelCase : Union[str, Any] = [tuple(('metadata' + split_layer[1]).split('/' ) )]
elif "kvstore" in layer:
__lowerCamelCase : Optional[int] = layer.split('kvstore' )
__lowerCamelCase : Optional[Any] = ''.join(split_layer[0] )[:-1]
__lowerCamelCase : Dict = [tuple(('kvstore' + split_layer[1]).split('/' ) )]
else:
__lowerCamelCase : str = layer.split('/' )
__lowerCamelCase : Union[str, Any] = '/'.join(split_layer[:-1] )
__lowerCamelCase : Any = (split_layer[-1],)
if "kvstore/path" in layer:
__lowerCamelCase : str = F"{switch_checkpoint_path}/{checkpoint_info[layer]}"
elif "kvstore/driver" in layer:
__lowerCamelCase : int = 'file'
else:
__lowerCamelCase : Any = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
__lowerCamelCase : Optional[int] = rename_keys(_A )
__lowerCamelCase : Optional[Any] = {}
for k, v in current_block.items():
__lowerCamelCase : Optional[int] = v
__lowerCamelCase : Union[str, Any] = new_current_block
torch.save(_A , _A )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = WEIGHTS_NAME ) -> Union[str, Any]:
__lowerCamelCase : Any = convert_file_size_to_int(_A )
__lowerCamelCase : Optional[int] = []
__lowerCamelCase : Tuple = {}
__lowerCamelCase : Optional[Any] = 0
__lowerCamelCase : int = 0
os.makedirs(_A , exist_ok=_A )
with gfile.GFile(switch_checkpoint_path + '/checkpoint' , 'rb' ) as fp:
__lowerCamelCase : Optional[int] = serialization.msgpack_restore(fp.read() )['optimizer']['target']
__lowerCamelCase : Optional[int] = flatten_dict(_A , sep='/' )
__lowerCamelCase : Optional[int] = {}
for layer in checkpoint_info.keys():
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Dict = get_key_and_tensorstore_dict(
_A , _A , _A )
if curr_real_layer_name in all_layers:
__lowerCamelCase : List[Any] = content
else:
__lowerCamelCase : Any = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
__lowerCamelCase : Union[str, Any] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
__lowerCamelCase : Dict = torch.tensor(_A )
__lowerCamelCase : Dict = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
__lowerCamelCase , __lowerCamelCase : Any = rename_base_flax_keys(tuple(key.split('/' ) ) , _A )
__lowerCamelCase : str = '/'.join(_A )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
__lowerCamelCase : Optional[Any] = os.path.join(
_A , weights_name.replace('.bin' , F"-{len(_A )+1:05d}-of-???.bin" ) )
rename_and_save_block(_A , _A )
sharded_state_dicts.append(current_block.keys() )
del current_block
__lowerCamelCase : Optional[Any] = {}
__lowerCamelCase : List[str] = 0
__lowerCamelCase : Optional[Any] = raw_weights.to(getattr(_A , _A ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
__lowerCamelCase : List[str] = os.path.join(_A , weights_name.replace('.bin' , F"-{len(_A )+1:05d}-of-???.bin" ) )
rename_and_save_block(_A , _A )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(_A ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
__lowerCamelCase : int = {}
__lowerCamelCase : Tuple = {}
for idx, shard in enumerate(_A ):
__lowerCamelCase : List[Any] = weights_name.replace(
'.bin' , F"-{idx+1:05d}-of-{len(_A ):05d}.bin" ) # len(sharded_state_dicts):05d}
__lowerCamelCase : int = os.path.join(_A , weights_name.replace('.bin' , F"-{idx+1:05d}-of-???.bin" ) )
os.rename(_A , os.path.join(_A , _A ) )
__lowerCamelCase : Optional[int] = shard
for key in shard:
__lowerCamelCase : Dict = shard_file
# Add the metadata
__lowerCamelCase : Tuple = {'total_size': total_size}
__lowerCamelCase : Optional[int] = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(_A , _A ) , 'w' , encoding='utf-8' ) as f:
__lowerCamelCase : Tuple = json.dumps(_A , indent=2 , sort_keys=_A ) + '\n'
f.write(_A )
return metadata, index
if __name__ == "__main__":
a : Optional[int] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--switch_t5x_checkpoint_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--max_shard_size""", default="""10GB""", required=False, help="""Max shard size""")
parser.add_argument("""--dtype""", default="""bfloat16""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
a : Optional[int] =parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def SCREAMING_SNAKE_CASE__ ( ) -> int:
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
__lowerCamelCase : str = SwitchTransformersConfig.from_pretrained('google/switch-base-8' )
config.save_pretrained('/home/arthur_huggingface_co/transformers/switch_converted' )
__lowerCamelCase : Optional[int] = SwitchTransformersForConditionalGeneration.from_pretrained(
'/home/arthur_huggingface_co/transformers/switch_converted' , device_map='auto' )
__lowerCamelCase : str = TaTokenizer.from_pretrained('t5-small' )
__lowerCamelCase : Dict = 'A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'
__lowerCamelCase : Any = tokenizer(_A , return_tensors='pt' ).input_ids
__lowerCamelCase : str = model.generate(_A , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 355
|
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : List[Any] = logging.get_logger()
# the current default level is logging.WARNING
__lowerCamelCase : Dict = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() ,logging.get_verbosity())
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() ,logging.get_verbosity())
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() ,logging.get_verbosity())
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() ,logging.get_verbosity())
# restore to the original level
logging.set_verbosity(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Optional[int] = logging.get_verbosity()
__lowerCamelCase : str = logging.get_logger('transformers.models.bart.tokenization_bart')
__lowerCamelCase : Tuple = 'Testing 1, 2, 3'
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(SCREAMING_SNAKE_CASE__) as cl:
logger.warning(SCREAMING_SNAKE_CASE__)
self.assertEqual(cl.out ,msg + '\n')
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(SCREAMING_SNAKE_CASE__) as cl:
logger.warning(SCREAMING_SNAKE_CASE__)
self.assertEqual(cl.out ,'')
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(SCREAMING_SNAKE_CASE__) as cl:
logger.warning(SCREAMING_SNAKE_CASE__)
self.assertEqual(cl.out ,msg + '\n')
# restore to the original level
logging.set_verbosity(SCREAMING_SNAKE_CASE__)
@mockenv(TRANSFORMERS_VERBOSITY='error')
def lowerCAmelCase ( self : Tuple):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
__lowerCamelCase : int = logging.get_logger('transformers.models.bart.tokenization_bart')
__lowerCamelCase : int = os.getenv('TRANSFORMERS_VERBOSITY' ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = logging.log_levels[env_level_str]
__lowerCamelCase : Tuple = logging.get_verbosity()
self.assertEqual(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,F"TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}" ,)
# restore to the original level
__lowerCamelCase : List[str] = ''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='super-error')
def lowerCAmelCase ( self : List[Any]):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
__lowerCamelCase : List[str] = logging.logging.getLogger()
with CaptureLogger(SCREAMING_SNAKE_CASE__) as cl:
# this action activates the env var
logging.get_logger('transformers.models.bart.tokenization_bart')
self.assertIn('Unknown option TRANSFORMERS_VERBOSITY=super-error' ,cl.out)
# no need to restore as nothing was changed
def lowerCAmelCase ( self : Any):
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
__lowerCamelCase : Tuple = logging.get_logger('transformers.models.bart.tokenization_bart')
__lowerCamelCase : Optional[int] = 'Testing 1, 2, 3'
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='1'):
# nothing should be logged as env var disables this method
with CaptureLogger(SCREAMING_SNAKE_CASE__) as cl:
logger.warning_advice(SCREAMING_SNAKE_CASE__)
self.assertEqual(cl.out ,'')
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS=''):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(SCREAMING_SNAKE_CASE__) as cl:
logger.warning_advice(SCREAMING_SNAKE_CASE__)
self.assertEqual(cl.out ,msg + '\n')
def SCREAMING_SNAKE_CASE__ ( ) -> Any:
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 113
| 0
|
'''simple docstring'''
import torch
def UpperCamelCase_ ( ):
'''simple docstring'''
if torch.cuda.is_available():
lowerCAmelCase_ : Optional[int] = torch.cuda.device_count()
else:
lowerCAmelCase_ : Union[str, Any] = 0
print(f'Successfully ran on {num_gpus} GPUs' )
if __name__ == "__main__":
main()
| 120
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
pass
class __snake_case :
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase : Any ) -> None:
lowerCAmelCase_ : Any = data
lowerCAmelCase_ : Node | None = None
def __iter__( self : Union[str, Any] ) -> Optional[Any]:
lowerCAmelCase_ : Union[str, Any] = self
lowerCAmelCase_ : Any = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(lowerCamelCase )
yield node.data
lowerCAmelCase_ : int = node.next_node
@property
def __lowercase ( self : str ) -> bool:
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
__A : Dict = Node(1)
__A : Optional[Any] = Node(2)
__A : int = Node(3)
__A : Optional[Any] = Node(4)
print(root_node.has_loop) # False
__A : Any = root_node.next_node
print(root_node.has_loop) # True
__A : List[Any] = Node(5)
__A : Dict = Node(6)
__A : str = Node(5)
__A : Dict = Node(6)
print(root_node.has_loop) # False
__A : Optional[int] = Node(1)
print(root_node.has_loop) # False
| 120
| 1
|
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple ):
if not is_accelerate_available():
return method
snake_case__ : List[Any] = version.parse(accelerate.__version__ ).base_version
if version.parse(snake_case_ ) < version.parse("0.17.0" ):
return method
def wrapper(self : str , *snake_case_ : Tuple , **snake_case_ : int ):
if hasattr(self , "_hf_hook" ) and hasattr(self._hf_hook , "pre_forward" ):
self._hf_hook.pre_forward(self )
return method(self , *snake_case_ , **snake_case_ )
return wrapper
| 286
|
import os
import pytest
from attr import dataclass
__lowerCamelCase : Any = """us-east-1""" # defaults region
@dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
a_ = 42
a_ = "arn:aws:iam::558105141721:role/sagemaker_execution_role"
a_ = {
"task_name": "mnli",
"per_device_train_batch_size": 1_6,
"per_device_eval_batch_size": 1_6,
"do_train": True,
"do_eval": True,
"do_predict": True,
"output_dir": "/opt/ml/model",
"overwrite_output_dir": True,
"max_steps": 5_0_0,
"save_steps": 5_5_0_0,
}
a_ = {**hyperparameters, "max_steps": 1_0_0_0}
@property
def _lowercase ( self : List[Any] ):
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def _lowercase ( self : Any ):
return f'''{self.framework}-transfromers-test'''
@property
def _lowercase ( self : Optional[Any] ):
return f'''./tests/sagemaker/scripts/{self.framework}'''
@property
def _lowercase ( self : Union[str, Any] ):
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="class" )
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
snake_case__ : Optional[int] = SageMakerTestEnvironment(framework=request.cls.framework )
| 286
| 1
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = tempfile.mkdtemp()
__lowerCamelCase = BlipImageProcessor()
__lowerCamelCase = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
__lowerCamelCase = BlipaProcessor(__UpperCAmelCase , __UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def lowerCamelCase ( self , **__UpperCAmelCase ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).tokenizer
def lowerCamelCase ( self , **__UpperCAmelCase ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).image_processor
def lowerCamelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__lowerCamelCase = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowerCamelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__lowerCamelCase = self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 )
__lowerCamelCase = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = BlipaProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
__lowerCamelCase = self.prepare_image_inputs()
__lowerCamelCase = image_processor(__UpperCAmelCase , return_tensors='''np''' )
__lowerCamelCase = processor(images=__UpperCAmelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = BlipaProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
__lowerCamelCase = '''lower newer'''
__lowerCamelCase = processor(text=__UpperCAmelCase )
__lowerCamelCase = tokenizer(__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = BlipaProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
__lowerCamelCase = '''lower newer'''
__lowerCamelCase = self.prepare_image_inputs()
__lowerCamelCase = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = BlipaProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
__lowerCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowerCamelCase = processor.batch_decode(__UpperCAmelCase )
__lowerCamelCase = tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = BlipaProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
__lowerCamelCase = '''lower newer'''
__lowerCamelCase = self.prepare_image_inputs()
__lowerCamelCase = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
| 330
|
from string import ascii_lowercase, ascii_uppercase
def a__ ( _UpperCamelCase : str ):
if not sentence:
return ""
__lowerCamelCase = dict(zip(_UpperCamelCase ,_UpperCamelCase ) )
return lower_to_upper.get(sentence[0] ,sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 330
| 1
|
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 365
|
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase = 1000 ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = -1
lowerCAmelCase__ : Optional[Any] = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
lowerCAmelCase__ : Optional[Any] = (n * n - 2 * a * n) // (2 * n - 2 * a)
lowerCAmelCase__ : Tuple = n - a - b
if c * c == (a * a + b * b):
lowerCAmelCase__ : int = a * b * c
if candidate >= product:
lowerCAmelCase__ : Any = candidate
return product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 184
| 0
|
'''simple docstring'''
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class UpperCAmelCase ( snake_case_ , unittest.TestCase ):
_lowercase: Union[str, Any] = BarthezTokenizer
_lowercase: List[str] = BarthezTokenizerFast
_lowercase: Tuple = True
_lowercase: str = True
def lowercase__ ( self : Optional[int] ) -> Any:
super().setUp()
_lowerCAmelCase = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__snake_case )
_lowerCAmelCase = tokenizer
def lowercase__ ( self : Optional[int] ) -> int:
_lowerCAmelCase = """<pad>"""
_lowerCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case ) , __snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case ) , __snake_case )
def lowercase__ ( self : List[str] ) -> Optional[int]:
_lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(__snake_case ) , 10_11_22 )
def lowercase__ ( self : Any ) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 10_11_22 )
@require_torch
def lowercase__ ( self : int ) -> Union[str, Any]:
_lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
_lowerCAmelCase = [0, 57, 30_18, 7_03_07, 91, 2]
_lowerCAmelCase = self.tokenizer(
__snake_case , max_length=len(__snake_case ) , padding=__snake_case , truncation=__snake_case , return_tensors="""pt""" )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
_lowerCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(__snake_case , __snake_case )
def lowercase__ ( self : List[Any] ) -> Any:
if not self.test_rust_tokenizer:
return
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = self.get_rust_tokenizer()
_lowerCAmelCase = """I was born in 92000, and this is falsé."""
_lowerCAmelCase = tokenizer.tokenize(__snake_case )
_lowerCAmelCase = rust_tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
_lowerCAmelCase = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
_lowerCAmelCase = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
_lowerCAmelCase = self.get_rust_tokenizer()
_lowerCAmelCase = tokenizer.encode(__snake_case )
_lowerCAmelCase = rust_tokenizer.encode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
@slow
def lowercase__ ( self : str ) -> List[str]:
# fmt: off
_lowerCAmelCase = {"""input_ids""": [[0, 4_90, 1_43_28, 45_07, 3_54, 47, 4_36_69, 95, 25, 7_81_17, 2_02_15, 1_97_79, 1_90, 22, 4_00, 4, 3_53_43, 8_03_10, 6_03, 86, 2_49_37, 1_05, 3_34_38, 9_47_62, 1_96, 3_96_42, 7, 15, 1_59_33, 1_73, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_05_34, 87, 25, 66, 33_58, 1_96, 5_52_89, 8, 8_29_61, 81, 22_04, 7_52_03, 7, 15, 7_63, 1_29_56, 2_16, 1_78, 1_43_28, 95_95, 13_77, 6_96_93, 7, 4_48, 7_10_21, 1_96, 1_81_06, 14_37, 1_39_74, 1_08, 90_83, 4, 4_93_15, 7, 39, 86, 13_26, 27_93, 4_63_33, 4, 4_48, 1_96, 7_45_88, 7, 4_93_15, 7, 39, 21, 8_22, 3_84_70, 74, 21, 6_67_23, 6_24_80, 8, 2_20_50, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_lowerCAmelCase = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=__snake_case , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=__snake_case , )
| 70
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple=1_3 , SCREAMING_SNAKE_CASE__ : str=7 , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=9_9 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3_2 , SCREAMING_SNAKE_CASE__ : List[str]=5 , SCREAMING_SNAKE_CASE__ : List[Any]=4 , SCREAMING_SNAKE_CASE__ : Tuple=3_7 , SCREAMING_SNAKE_CASE__ : Any="gelu" , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : int=5_1_2 , SCREAMING_SNAKE_CASE__ : int=1_6 , SCREAMING_SNAKE_CASE__ : Optional[int]=2 , SCREAMING_SNAKE_CASE__ : Any=0.02 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE__ : Optional[Any]=4 , SCREAMING_SNAKE_CASE__ : Optional[int]=None , ) -> Any:
a_ : Tuple = parent
a_ : int = batch_size
a_ : Tuple = seq_length
a_ : List[Any] = is_training
a_ : List[str] = use_token_type_ids
a_ : Dict = use_labels
a_ : Any = vocab_size
a_ : List[str] = hidden_size
a_ : Tuple = num_hidden_layers
a_ : List[Any] = num_attention_heads
a_ : Dict = intermediate_size
a_ : Any = hidden_act
a_ : List[str] = hidden_dropout_prob
a_ : Tuple = attention_probs_dropout_prob
a_ : Optional[Any] = max_position_embeddings
a_ : List[Any] = type_vocab_size
a_ : int = type_sequence_label_size
a_ : List[Any] = initializer_range
a_ : List[str] = num_labels
a_ : Union[str, Any] = num_choices
a_ : str = scope
a_ : Tuple = self.vocab_size - 1
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
a_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a_ : Any = None
if self.use_token_type_ids:
a_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a_ : List[Any] = None
a_ : Union[str, Any] = None
a_ : List[Any] = None
if self.use_labels:
a_ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a_ : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
a_ : Union[str, Any] = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
a_ : List[str] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , *SCREAMING_SNAKE_CASE__ : Tuple ) -> Union[str, Any]:
a_ : Dict = OpenAIGPTModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : str = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , head_mask=SCREAMING_SNAKE_CASE__ )
a_ : Dict = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
a_ : Dict = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Any:
a_ : str = OpenAIGPTLMHeadModel(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : Optional[int] = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , *SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Dict:
a_ : int = OpenAIGPTDoubleHeadsModel(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : str = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
a_ : Any = self.num_labels
a_ : Dict = OpenAIGPTForSequenceClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ : Any = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
a_ : Optional[Any] = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) : Optional[Any] = config_and_inputs
a_ : Optional[int] = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
snake_case__ : Tuple = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
snake_case__ : List[str] = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
snake_case__ : Dict = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] ) -> Dict:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any=False ) -> List[str]:
a_ : str = super()._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
a_ : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ , )
a_ : str = inputs_dict['labels']
a_ : Optional[int] = inputs_dict['labels']
a_ : Optional[int] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ , )
a_ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ )
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
a_ : str = OpenAIGPTModelTester(self )
a_ : int = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , n_embd=3_7 )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
a_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
a_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
a_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
a_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*SCREAMING_SNAKE_CASE__ )
@slow
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ : str = OpenAIGPTModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
a_ : Dict = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = torch.tensor([[4_8_1, 4_7_3_5, 5_4_4]] , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ ) # the president is
a_ : Tuple = [
4_8_1,
4_7_3_5,
5_4_4,
2_4_6,
9_6_3,
8_7_0,
7_6_2,
2_3_9,
2_4_4,
4_0_4_7_7,
2_4_4,
2_4_9,
7_1_9,
8_8_1,
4_8_7,
5_4_4,
2_4_0,
2_4_4,
6_0_3,
4_8_1,
] # the president is a very good man. " \n " i\'m sure he is, " said the
a_ : Dict = model.generate(SCREAMING_SNAKE_CASE__ , do_sample=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(output_ids[0].tolist() , SCREAMING_SNAKE_CASE__ )
| 32
| 0
|
'''simple docstring'''
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class lowerCamelCase_ ( __a ):
@slow
@require_torch
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Dict = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' )
UpperCAmelCase__ : Optional[int] = BertTokenizer.from_pretrained('''bert-base-uncased''' )
UpperCAmelCase__ : Optional[int] = bertabert.config.encoder.vocab_size
UpperCAmelCase__ : str = tokenizer.sep_token_id
UpperCAmelCase__ : Any = tokenizer.cls_token_id
UpperCAmelCase__ : List[Any] = 128
UpperCAmelCase__ : Tuple = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' )
UpperCAmelCase__ : Any = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' )
UpperCAmelCase__ : Any = train_dataset.select(range(32 ) )
UpperCAmelCase__ : Any = val_dataset.select(range(16 ) )
UpperCAmelCase__ : Dict = 4
def _map_to_encoder_decoder_inputs(_A : Optional[int] ):
# Tokenizer will automatically set [BOS] <text> [EOS]
UpperCAmelCase__ : List[Any] = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=__UpperCAmelCase , max_length=512 )
UpperCAmelCase__ : List[str] = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=__UpperCAmelCase , max_length=128 )
UpperCAmelCase__ : str = inputs.input_ids
UpperCAmelCase__ : List[Any] = inputs.attention_mask
UpperCAmelCase__ : List[Any] = outputs.input_ids
UpperCAmelCase__ : Union[str, Any] = outputs.input_ids.copy()
UpperCAmelCase__ : str = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels''']
]
UpperCAmelCase__ : Optional[Any] = outputs.attention_mask
assert all(len(__UpperCAmelCase ) == 512 for x in inputs.input_ids )
assert all(len(__UpperCAmelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_A : int ):
UpperCAmelCase__ : Any = pred.label_ids
UpperCAmelCase__ : Optional[int] = pred.predictions
# all unnecessary tokens are removed
UpperCAmelCase__ : Union[str, Any] = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
UpperCAmelCase__ : List[Any] = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
UpperCAmelCase__ : List[Any] = sum([int(pred_str[i] == label_str[i] ) for i in range(len(__UpperCAmelCase ) )] ) / len(__UpperCAmelCase )
return {"accuracy": accuracy}
# map train dataset
UpperCAmelCase__ : Optional[Any] = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=__UpperCAmelCase , batch_size=__UpperCAmelCase , remove_columns=['''article''', '''highlights'''] , )
train_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
# same for validation dataset
UpperCAmelCase__ : Optional[int] = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=__UpperCAmelCase , batch_size=__UpperCAmelCase , remove_columns=['''article''', '''highlights'''] , )
val_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
UpperCAmelCase__ : Optional[Any] = self.get_auto_remove_tmp_dir()
UpperCAmelCase__ : Optional[Any] = SeqaSeqTrainingArguments(
output_dir=__UpperCAmelCase , per_device_train_batch_size=__UpperCAmelCase , per_device_eval_batch_size=__UpperCAmelCase , predict_with_generate=__UpperCAmelCase , evaluation_strategy='''steps''' , do_train=__UpperCAmelCase , do_eval=__UpperCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
UpperCAmelCase__ : int = SeqaSeqTrainer(
model=__UpperCAmelCase , args=__UpperCAmelCase , compute_metrics=_compute_metrics , train_dataset=__UpperCAmelCase , eval_dataset=__UpperCAmelCase , tokenizer=__UpperCAmelCase , )
# start training
trainer.train()
| 354
|
'''simple docstring'''
from collections.abc import Iterable
from typing import Any
class lowerCamelCase_ :
def __init__( self : List[Any] , _A : int | None = None ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = value
UpperCAmelCase__ : Node | None = None # Added in order to delete a node easier
UpperCAmelCase__ : Node | None = None
UpperCAmelCase__ : Node | None = None
def __repr__( self : Optional[Any] ):
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({f"""{self.value}""": (self.left, self.right)} , indent=1 )
class lowerCamelCase_ :
def __init__( self : Optional[Any] , _A : Node | None = None ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = root
def __str__( self : Union[str, Any] ):
'''simple docstring'''
return str(self.root )
def lowercase_ ( self : str , _A : Node , _A : Node | None ):
'''simple docstring'''
if new_children is not None: # reset its kids
UpperCAmelCase__ : Dict = node.parent
if node.parent is not None: # reset its parent
if self.is_right(_A ): # If it is the right children
UpperCAmelCase__ : str = new_children
else:
UpperCAmelCase__ : Optional[int] = new_children
else:
UpperCAmelCase__ : Union[str, Any] = new_children
def lowercase_ ( self : Union[str, Any] , _A : Node ):
'''simple docstring'''
if node.parent and node.parent.right:
return node == node.parent.right
return False
def lowercase_ ( self : int ):
'''simple docstring'''
return self.root is None
def lowercase_ ( self : List[str] , _A : Any ):
'''simple docstring'''
UpperCAmelCase__ : Dict = Node(_A ) # create a new Node
if self.empty(): # if Tree is empty
UpperCAmelCase__ : List[Any] = new_node # set its root
else: # Tree is not empty
UpperCAmelCase__ : str = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
UpperCAmelCase__ : Optional[Any] = new_node # We insert the new node in a leaf
break
else:
UpperCAmelCase__ : Any = parent_node.left
else:
if parent_node.right is None:
UpperCAmelCase__ : str = new_node
break
else:
UpperCAmelCase__ : List[str] = parent_node.right
UpperCAmelCase__ : Tuple = parent_node
def lowercase_ ( self : Optional[Any] , *_A : Tuple ):
'''simple docstring'''
for value in values:
self.__insert(_A )
def lowercase_ ( self : Union[str, Any] , _A : int ):
'''simple docstring'''
if self.empty():
raise IndexError('''Warning: Tree is empty! please use another.''' )
else:
UpperCAmelCase__ : List[Any] = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
UpperCAmelCase__ : str = node.left if value < node.value else node.right
return node
def lowercase_ ( self : List[Any] , _A : Node | None = None ):
'''simple docstring'''
if node is None:
if self.root is None:
return None
UpperCAmelCase__ : int = self.root
if not self.empty():
while node.right is not None:
UpperCAmelCase__ : Tuple = node.right
return node
def lowercase_ ( self : List[Any] , _A : Node | None = None ):
'''simple docstring'''
if node is None:
UpperCAmelCase__ : Optional[int] = self.root
if self.root is None:
return None
if not self.empty():
UpperCAmelCase__ : Optional[int] = self.root
while node.left is not None:
UpperCAmelCase__ : Tuple = node.left
return node
def lowercase_ ( self : List[Any] , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.search(_A ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(_A , _A )
elif node.left is None: # Has only right children
self.__reassign_nodes(_A , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(_A , node.left )
else:
UpperCAmelCase__ : Union[str, Any] = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
UpperCAmelCase__ : Optional[Any] = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def lowercase_ ( self : List[str] , _A : Node | None ):
'''simple docstring'''
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def lowercase_ ( self : str , _A : Any=None ):
'''simple docstring'''
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def lowercase_ ( self : Dict , _A : list , _A : Node | None ):
'''simple docstring'''
if node:
self.inorder(_A , node.left )
arr.append(node.value )
self.inorder(_A , node.right )
def lowercase_ ( self : Optional[Any] , _A : int , _A : Node ):
'''simple docstring'''
UpperCAmelCase__ : list[int] = []
self.inorder(_A , _A ) # append all values to list using inorder traversal
return arr[k - 1]
def a__ ( lowerCAmelCase__ ) -> list[Node]:
UpperCAmelCase__ : Union[str, Any] = []
if curr_node is not None:
UpperCAmelCase__ : str = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def a__ ( ) -> None:
UpperCAmelCase__ : List[Any] = (8, 3, 6, 1, 10, 14, 13, 4, 7)
UpperCAmelCase__ : str = BinarySearchTree()
for i in testlist:
t.insert(lowerCAmelCase__ )
# Prints all the elements of the list in order traversal
print(lowerCAmelCase__ )
if t.search(6 ) is not None:
print('''The value 6 exists''' )
else:
print('''The value 6 doesn\'t exist''' )
if t.search(-1 ) is not None:
print('''The value -1 exists''' )
else:
print('''The value -1 doesn\'t exist''' )
if not t.empty():
print('''Max Value: ''' , t.get_max().value ) # type: ignore
print('''Min Value: ''' , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(lowerCAmelCase__ )
print(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 299
| 0
|
"""simple docstring"""
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
SCREAMING_SNAKE_CASE__ = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class lowercase :
def __init__( self , lowercase , lowercase=16 , lowercase=13 , lowercase=7 , lowercase=14 , lowercase=10 , lowercase=19 , lowercase=5 , lowercase=4 , lowercase=True , lowercase=16 , lowercase=2 , lowercase=4 , lowercase=4 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=[1, 2, 3, 4, 5] , lowercase=25 , lowercase=5 , ) -> Any:
lowerCAmelCase = d_model
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = prediction_length
lowerCAmelCase = context_length
lowerCAmelCase = cardinality
lowerCAmelCase = num_time_features
lowerCAmelCase = lags_sequence
lowerCAmelCase = embedding_dimension
lowerCAmelCase = is_training
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = context_length
lowerCAmelCase = prediction_length + label_length
lowerCAmelCase = label_length
lowerCAmelCase = moving_average
lowerCAmelCase = autocorrelation_factor
def _snake_case ( self ) -> Optional[int]:
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def _snake_case ( self , lowercase ) -> Optional[int]:
lowerCAmelCase = config.context_length + max(config.lags_sequence )
lowerCAmelCase = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
lowerCAmelCase = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
lowerCAmelCase = floats_tensor([self.batch_size, _past_length] )
lowerCAmelCase = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
lowerCAmelCase = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
lowerCAmelCase = floats_tensor([self.batch_size, config.prediction_length] )
lowerCAmelCase = {
"""past_values""": past_values,
"""static_categorical_features""": static_categorical_features,
"""past_time_features""": past_time_features,
"""past_observed_mask""": past_observed_mask,
"""future_time_features""": future_time_features,
"""future_values""": future_values,
}
return inputs_dict
def _snake_case ( self ) -> str:
lowerCAmelCase = self.get_config()
lowerCAmelCase = self.prepare_autoformer_inputs_dict(lowercase )
return config, inputs_dict
def _snake_case ( self ) -> Any:
lowerCAmelCase , lowerCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def _snake_case ( self , lowercase , lowercase ) -> Union[str, Any]:
lowerCAmelCase = AutoformerModel(config=lowercase ).to(lowercase ).eval()
lowerCAmelCase = model(**lowercase )
lowerCAmelCase = outputs.encoder_last_hidden_state
lowerCAmelCase = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase = model.get_encoder()
encoder.save_pretrained(lowercase )
lowerCAmelCase = AutoformerEncoder.from_pretrained(lowercase ).to(lowercase )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = model.create_network_inputs(**lowercase )
lowerCAmelCase , lowerCAmelCase = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
lowerCAmelCase = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
lowerCAmelCase = encoder(inputs_embeds=lowercase )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
lowerCAmelCase = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
lowerCAmelCase = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
lowerCAmelCase = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
lowerCAmelCase = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase = model.get_decoder()
decoder.save_pretrained(lowercase )
lowerCAmelCase = AutoformerDecoder.from_pretrained(lowercase ).to(lowercase )
lowerCAmelCase = decoder(
trend=lowercase , inputs_embeds=lowercase , encoder_hidden_states=lowercase , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE = (AutoformerForPrediction,) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE = {'feature-extraction': AutoformerModel} if is_torch_available() else {}
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = AutoformerModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase )
def _snake_case ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _snake_case ( self ) -> str:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(lowercase )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase )
lowerCAmelCase , lowerCAmelCase = model_class.from_pretrained(lowercase , output_loading_info=lowercase )
self.assertEqual(info["""missing_keys"""] , [] )
def _snake_case ( self ) -> Any:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*lowercase )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def _snake_case ( self ) -> str:
pass
def _snake_case ( self ) -> int:
lowerCAmelCase = inspect.signature(getattr(lowercase , """forward""" ) )
# The main input is the name of the argument after `self`
lowerCAmelCase = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , lowercase )
def _snake_case ( self ) -> int:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(lowercase )
lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase = [*signature.parameters.keys()]
lowerCAmelCase = [
"""past_values""",
"""past_time_features""",
"""past_observed_mask""",
"""static_categorical_features""",
"""static_real_features""",
"""future_values""",
"""future_time_features""",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(lowercase )] , lowercase )
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = True
lowerCAmelCase = getattr(self.model_tester , """seq_length""" , lowercase )
lowerCAmelCase = getattr(self.model_tester , """decoder_seq_length""" , lowercase )
lowerCAmelCase = getattr(self.model_tester , """encoder_seq_length""" , lowercase )
lowerCAmelCase = getattr(self.model_tester , """d_model""" , lowercase )
lowerCAmelCase = getattr(self.model_tester , """num_attention_heads""" , lowercase )
lowerCAmelCase = d_model // num_attention_heads
for model_class in self.all_model_classes:
lowerCAmelCase = True
lowerCAmelCase = False
lowerCAmelCase = True
lowerCAmelCase = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
lowerCAmelCase = model(**self._prepare_for_class(lowercase , lowercase ) )
lowerCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase = True
lowerCAmelCase = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
lowerCAmelCase = model(**self._prepare_for_class(lowercase , lowercase ) )
lowerCAmelCase = outputs.encoder_attentions
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
lowerCAmelCase = len(lowercase )
lowerCAmelCase = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(lowercase , lowercase )
# decoder attentions
lowerCAmelCase = outputs.decoder_attentions
self.assertIsInstance(lowercase , (list, tuple) )
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
lowerCAmelCase = outputs.cross_attentions
self.assertIsInstance(lowercase , (list, tuple) )
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
lowerCAmelCase = model(**self._prepare_for_class(lowercase , lowercase ) )
self.assertEqual(out_len + 2 , len(lowercase ) )
lowerCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def _snake_case ( self ) -> Any:
super().test_retain_grad_hidden_states_attentions()
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : List[str]="train-batch.pt" ):
'''simple docstring'''
lowerCAmelCase = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" , filename=SCREAMING_SNAKE_CASE , repo_type="""dataset""" )
lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE , map_location=SCREAMING_SNAKE_CASE )
return batch
@require_torch
@slow
class lowercase ( unittest.TestCase ):
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(lowercase )
lowerCAmelCase = prepare_batch()
with torch.no_grad():
lowerCAmelCase = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0]
lowerCAmelCase = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , lowercase )
lowerCAmelCase = torch.tensor(
[[0.3_593, -1.3_398, 0.6_330], [0.2_279, 1.5_396, -0.1_792], [0.0_450, 1.3_225, -0.2_335]] , device=lowercase )
self.assertTrue(torch.allclose(output[0, :3, :3] , lowercase , atol=lowercase ) )
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(lowercase )
lowerCAmelCase = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
lowerCAmelCase = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state
lowerCAmelCase = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , lowercase )
lowerCAmelCase = torch.tensor(
[[-0.0_734, -0.9_036, 0.8_358], [4.7_186, 2.4_113, 1.9_581], [1.7_953, 2.3_558, 1.2_970]] , device=lowercase )
self.assertTrue(torch.allclose(output[0, :3, :3] , lowercase , atol=lowercase ) )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(lowercase )
lowerCAmelCase = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
lowerCAmelCase = model.generate(
static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , )
lowerCAmelCase = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , lowercase )
lowerCAmelCase = torch.tensor([3_130.6_763, 4_056.5_293, 7_053.0_786] , device=lowercase )
lowerCAmelCase = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , lowercase , rtol=1e-1 ) )
| 46
|
"""simple docstring"""
__UpperCamelCase = frozenset(
[
'''prompt''',
'''height''',
'''width''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
'''cross_attention_kwargs''',
]
)
__UpperCamelCase = frozenset(['''prompt''', '''negative_prompt'''])
__UpperCamelCase = frozenset([])
__UpperCamelCase = frozenset(['''image'''])
__UpperCamelCase = frozenset(
[
'''image''',
'''height''',
'''width''',
'''guidance_scale''',
]
)
__UpperCamelCase = frozenset(['''image'''])
__UpperCamelCase = frozenset(
[
'''prompt''',
'''image''',
'''height''',
'''width''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
]
)
__UpperCamelCase = frozenset(['''prompt''', '''image''', '''negative_prompt'''])
__UpperCamelCase = frozenset(
[
# Text guided image variation with an image mask
'''prompt''',
'''image''',
'''mask_image''',
'''height''',
'''width''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
]
)
__UpperCamelCase = frozenset(['''prompt''', '''image''', '''mask_image''', '''negative_prompt'''])
__UpperCamelCase = frozenset(
[
# image variation with an image mask
'''image''',
'''mask_image''',
'''height''',
'''width''',
'''guidance_scale''',
]
)
__UpperCamelCase = frozenset(['''image''', '''mask_image'''])
__UpperCamelCase = frozenset(
[
'''example_image''',
'''image''',
'''mask_image''',
'''height''',
'''width''',
'''guidance_scale''',
]
)
__UpperCamelCase = frozenset(['''example_image''', '''image''', '''mask_image'''])
__UpperCamelCase = frozenset(['''class_labels'''])
__UpperCamelCase = frozenset(['''class_labels'''])
__UpperCamelCase = frozenset(['''batch_size'''])
__UpperCamelCase = frozenset([])
__UpperCamelCase = frozenset(['''batch_size'''])
__UpperCamelCase = frozenset([])
__UpperCamelCase = frozenset(
[
'''prompt''',
'''audio_length_in_s''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
'''cross_attention_kwargs''',
]
)
__UpperCamelCase = frozenset(['''prompt''', '''negative_prompt'''])
__UpperCamelCase = frozenset(['''input_tokens'''])
__UpperCamelCase = frozenset(['''input_tokens'''])
| 113
| 0
|
'''simple docstring'''
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def a ( lowerCamelCase__ , lowerCamelCase__=() , lowerCamelCase__=None , lowerCamelCase__="no" , lowerCamelCase__="29500" ):
'''simple docstring'''
A_ : List[Any] = False
A_ : Tuple = False
if any(key.startswith("""KAGGLE""" ) for key in os.environ.keys() ):
A_ : List[str] = True
elif "IPython" in sys.modules:
A_ : str = """google.colab""" in str(sys.modules["""IPython"""].get_ipython() )
try:
A_ : Optional[int] = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
f'Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.' )
if (in_colab or in_kaggle) and (os.environ.get("""TPU_NAME""" , lowerCamelCase__ ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"""To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside """
"""your training function. Restart your notebook and make sure no cells initializes an """
"""`Accelerator`.""" )
if num_processes is None:
A_ : str = 8
A_ : List[str] = PrepareForLaunch(lowerCamelCase__ , distributed_type="""TPU""" )
print(f'Launching a training on {num_processes} TPU cores.' )
xmp.spawn(lowerCamelCase__ , args=lowerCamelCase__ , nprocs=lowerCamelCase__ , start_method="""fork""" )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print("""Launching training on one GPU.""" )
else:
print("""Launching training on one CPU.""" )
function(*lowerCamelCase__ )
else:
if num_processes is None:
raise ValueError(
"""You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.""" )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"""To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized """
"""inside your training function. Restart your notebook and make sure no cells initializes an """
"""`Accelerator`.""" )
if torch.cuda.is_initialized():
raise ValueError(
"""To launch a multi-GPU training from your notebook, you need to avoid running any instruction """
"""using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA """
"""function.""" )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=lowerCamelCase__ , master_addr="""127.0.01""" , master_port=lowerCamelCase__ , mixed_precision=lowerCamelCase__ ):
A_ : Union[str, Any] = PrepareForLaunch(lowerCamelCase__ , distributed_type="""MULTI_GPU""" )
print(f'Launching training on {num_processes} GPUs.' )
try:
start_processes(lowerCamelCase__ , args=lowerCamelCase__ , nprocs=lowerCamelCase__ , start_method="""fork""" )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
"""CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. """
"""This likely stems from an outside import causing issues once the `notebook_launcher()` is called. """
"""Please review your imports and test them when running the `notebook_launcher()` to identify """
"""which one is problematic.""" ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
A_ : List[Any] = """1"""
print("""Launching training on MPS.""" )
elif torch.cuda.is_available():
print("""Launching training on one GPU.""" )
else:
print("""Launching training on CPU.""" )
function(*lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__=() , lowerCamelCase__=2 ):
'''simple docstring'''
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=lowerCamelCase__ , master_addr="""127.0.01""" , master_port="""29500""" , accelerate_mixed_precision="""no""" , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu="""yes""" , ):
A_ : int = PrepareForLaunch(lowerCamelCase__ , debug=lowerCamelCase__ )
start_processes(lowerCamelCase__ , args=lowerCamelCase__ , nprocs=lowerCamelCase__ , start_method="""fork""" )
| 135
|
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
lowerCamelCase :str = logging.get_logger(__name__)
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Tuple = ['pixel_values']
def __init__(self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BILINEAR , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = None , lowercase = True , **lowercase , ):
super().__init__(**lowercase )
A_ : Union[str, Any] = size if size is not None else {"""shortest_edge""": 224}
A_ : Union[str, Any] = get_size_dict(lowercase , default_to_square=lowercase )
A_ : Optional[Any] = crop_size if crop_size is not None else {"""height""": 256, """width""": 256}
A_ : Tuple = get_size_dict(lowercase , param_name="""crop_size""" )
A_ : List[Any] = do_resize
A_ : List[str] = size
A_ : Dict = resample
A_ : int = do_rescale
A_ : str = rescale_factor
A_ : Tuple = do_center_crop
A_ : Tuple = crop_size
A_ : List[str] = do_flip_channel_order
def _a (self , lowercase , lowercase , lowercase = PIL.Image.BILINEAR , lowercase = None , **lowercase , ):
A_ : Any = get_size_dict(lowercase , default_to_square=lowercase )
if "shortest_edge" not in size:
raise ValueError(F'The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}' )
A_ : Any = get_resize_output_image_size(lowercase , size=size["""shortest_edge"""] , default_to_square=lowercase )
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase , lowercase = None , **lowercase , ):
A_ : Tuple = get_size_dict(lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(lowercase , size=(size["""height"""], size["""width"""]) , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase , lowercase = None , **lowercase , ):
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase = None ):
return flip_channel_order(lowercase , data_format=lowercase )
def _a (self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ):
A_ : str = do_resize if do_resize is not None else self.do_resize
A_ : Optional[int] = resample if resample is not None else self.resample
A_ : str = do_rescale if do_rescale is not None else self.do_rescale
A_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ : str = do_center_crop if do_center_crop is not None else self.do_center_crop
A_ : Dict = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
A_ : Union[str, Any] = size if size is not None else self.size
A_ : Dict = get_size_dict(lowercase , default_to_square=lowercase )
A_ : Any = crop_size if crop_size is not None else self.crop_size
A_ : Union[str, Any] = get_size_dict(lowercase , param_name="""crop_size""" )
A_ : Union[str, Any] = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
# All transformations expect numpy arrays.
A_ : Optional[Any] = [to_numpy_array(lowercase ) for image in images]
if do_resize:
A_ : List[Any] = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images]
if do_center_crop:
A_ : str = [self.center_crop(image=lowercase , size=lowercase ) for image in images]
if do_rescale:
A_ : Dict = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
A_ : List[str] = [self.flip_channel_order(image=lowercase ) for image in images]
A_ : List[str] = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
A_ : str = {"""pixel_values""": images}
return BatchFeature(data=lowercase , tensor_type=lowercase )
def _a (self , lowercase , lowercase = None ):
A_ : Optional[Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase ) != len(lowercase ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(lowercase ):
A_ : Dict = target_sizes.numpy()
A_ : Union[str, Any] = []
for idx in range(len(lowercase ) ):
A_ : Union[str, Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=lowercase )
A_ : Any = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowercase )
else:
A_ : str = logits.argmax(dim=1 )
A_ : Optional[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 135
| 1
|
"""simple docstring"""
import argparse
from collections import defaultdict
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : Optional[int] = f"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(_UpperCAmelCase , 'r' ) as f:
A_ : Any = f.readlines()
A_ : int = f"""class {class_name}("""
A_ : List[str] = f"""{4 * " "}def {test_name}("""
A_ : Optional[int] = f"""{8 * " "}{correct_line.split()[0]}"""
A_ : int = f"""{16 * " "}{correct_line.split()[0]}"""
A_ : Optional[int] = False
A_ : Tuple = False
A_ : Any = False
A_ : Tuple = False
A_ : Any = 0
A_ : int = 0
A_ : int = []
for line in lines:
if line.startswith(_UpperCAmelCase ):
A_ : Optional[int] = True
elif in_class and line.startswith(_UpperCAmelCase ):
A_ : Optional[int] = True
elif in_class and in_func and (line.startswith(_UpperCAmelCase ) or line.startswith(_UpperCAmelCase )):
A_ : Union[str, Any] = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
A_ : List[Any] = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
A_ : Tuple = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f"""{spaces * " "}{correct_line}""" )
A_ : Dict = False
else:
new_lines.append(_UpperCAmelCase )
with open(_UpperCAmelCase , 'w' ) as f:
for line in new_lines:
f.write(_UpperCAmelCase )
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase=None ):
"""simple docstring"""
if fail is not None:
with open(_UpperCAmelCase , 'r' ) as f:
A_ : str = {l.strip() for l in f.readlines()}
else:
A_ : List[str] = None
with open(_UpperCAmelCase , 'r' ) as f:
A_ : int = f.readlines()
A_ : Optional[Any] = defaultdict(_UpperCAmelCase )
for line in correct_lines:
A_ , A_ , A_ , A_ : Any = line.split(';' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
lowerCamelCase_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--correct_filename', help='filename of tests with expected result')
parser.add_argument('--fail_filename', help='filename of test failures', type=str, default=None)
lowerCamelCase_ : Optional[Any] = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 286
|
"""simple docstring"""
import os
def UpperCAmelCase__ ( ):
"""simple docstring"""
A_ : Any = os.path.join(os.path.dirname(_UpperCAmelCase ) , 'num.txt' )
with open(_UpperCAmelCase ) as file_hand:
return str(sum(int(_UpperCAmelCase ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 286
| 1
|
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
A__ = []
A__ = []
A__ = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
A__ = len(UpperCamelCase__ ) if (len(UpperCamelCase__ ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ) , '''Stack'''.center(UpperCamelCase__ ) , '''Postfix'''.center(UpperCamelCase__ ) , sep=''' | ''' , )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(UpperCamelCase__ ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(UpperCamelCase__ ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(UpperCamelCase__ ) == 0:
stack.append(UpperCamelCase__ ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(UpperCamelCase__ ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(UpperCamelCase__ ) # push x to stack
print(
x.center(8 ) , (''''''.join(UpperCamelCase__ )).ljust(UpperCamelCase__ ) , (''''''.join(UpperCamelCase__ )).ljust(UpperCamelCase__ ) , sep=''' | ''' , ) # Output in tabular format
while len(UpperCamelCase__ ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ) , (''''''.join(UpperCamelCase__ )).ljust(UpperCamelCase__ ) , (''''''.join(UpperCamelCase__ )).ljust(UpperCamelCase__ ) , sep=''' | ''' , ) # Output in tabular format
return "".join(UpperCamelCase__ ) # return Postfix as str
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Dict:
"""simple docstring"""
A__ = list(infix[::-1] ) # reverse the infix equation
for i in range(len(UpperCamelCase__ ) ):
if infix[i] == "(":
A__ = ''')''' # change "(" to ")"
elif infix[i] == ")":
A__ = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(UpperCamelCase__ ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = input("""\nEnter an Infix Equation = """) # Input an Infix equation
_lowerCamelCase : int = ''.join(Infix.split()) # Remove spaces from the input
print("""\n\t""", Infix, """(Infix) -> """, infix_2_prefix(Infix), """(Prefix)""")
| 367
|
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Tuple:
"""simple docstring"""
A__ = AutoConfig.from_pretrained(lowercase_ )
A__ = FlaxAutoModelForSeqaSeqLM.from_config(config=lowercase_ )
A__ = checkpoints.load_tax_checkpoint(lowercase_ )
A__ = '''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp''']
if config.model_type == "t5":
A__ = '''SelfAttention'''
if config.model_type == "longt5" and config.encoder_attention_type == "local":
A__ = '''LocalSelfAttention'''
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
A__ = '''TransientGlobalSelfAttention'''
else:
raise ValueError(
'''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'''
''' attribute with a value from [\'local\', \'transient-global].''' )
# Encoder
for layer_index in range(config.num_layers ):
A__ = f"""layers_{str(lowercase_ )}"""
# Self-Attention
A__ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel''']
A__ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel''']
A__ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel''']
A__ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel''']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
A__ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale''']
# Layer Normalization
A__ = tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale''']
if split_mlp_wi:
A__ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
A__ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
A__ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
A__ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
A__ = tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
A__ = flax_model.params['''encoder''']['''block'''][str(lowercase_ )]['''layer''']
A__ = tax_attention_key
A__ = tax_attention_out
A__ = tax_attention_query
A__ = tax_attention_value
A__ = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
A__ = tax_global_layer_norm
if split_mlp_wi:
A__ = tax_mlp_wi_a
A__ = tax_mlp_wi_a
else:
A__ = tax_mlp_wi
A__ = tax_mlp_wo
A__ = tax_mlp_layer_norm
A__ = flax_model_encoder_layer_block
# Only for layer 0:
A__ = tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T
A__ = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
A__ = tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T
A__ = tax_encoder_global_rel_embedding
# Assigning
A__ = tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale''']
A__ = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
A__ = f"""layers_{str(lowercase_ )}"""
# Self-Attention
A__ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel''']
A__ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel''']
A__ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel''']
A__ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel''']
# Layer Normalization
A__ = tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][
'''scale'''
]
# Encoder-Decoder-Attention
A__ = tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention''']
A__ = tax_enc_dec_attention_module['''key''']['''kernel''']
A__ = tax_enc_dec_attention_module['''out''']['''kernel''']
A__ = tax_enc_dec_attention_module['''query''']['''kernel''']
A__ = tax_enc_dec_attention_module['''value''']['''kernel''']
# Layer Normalization
A__ = tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale''']
# MLP
if split_mlp_wi:
A__ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
A__ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
A__ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
A__ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
A__ = tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
A__ = flax_model.params['''decoder''']['''block'''][str(lowercase_ )]['''layer''']
A__ = tax_attention_key
A__ = tax_attention_out
A__ = tax_attention_query
A__ = tax_attention_value
A__ = tax_pre_attention_layer_norm
A__ = tax_enc_dec_attention_key
A__ = tax_enc_dec_attention_out
A__ = tax_enc_dec_attention_query
A__ = tax_enc_dec_attention_value
A__ = tax_cross_layer_norm
if split_mlp_wi:
A__ = tax_mlp_wi_a
A__ = tax_mlp_wi_a
else:
A__ = tax_mlp_wi
A__ = tax_mlp_wo
A__ = txa_mlp_layer_norm
A__ = flax_model_decoder_layer_block
# Decoder Normalization
A__ = tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale''']
A__ = txa_decoder_norm
# Only for layer 0:
A__ = tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T
A__ = tax_decoder_rel_embedding
# Token Embeddings
A__ = tax_model['''target''']['''token_embedder''']['''embedding''']
A__ = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
A__ = tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel''']
flax_model.save_pretrained(lowercase_ )
print('''T5X Model was sucessfully converted!''' )
if __name__ == "__main__":
_lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path the T5X checkpoint."""
)
parser.add_argument("""--config_name""", default=None, type=str, required=True, help="""Config name of LongT5/T5 model.""")
parser.add_argument(
"""--flax_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output FLAX model."""
)
_lowerCamelCase : Tuple = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 231
| 0
|
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {"vocab_file": "spiece.model"}
UpperCamelCase = {
"vocab_file": {
"AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model",
}
}
UpperCamelCase = {
"AI-Sweden/gpt-sw3-126m": 2_048,
"AI-Sweden/gpt-sw3-350m": 2_048,
"AI-Sweden/gpt-sw3-1.6b": 2_048,
"AI-Sweden/gpt-sw3-6.7b": 2_048,
"AI-Sweden/gpt-sw3-20b": 2_048,
}
class __UpperCAmelCase (lowercase__ ):
__snake_case : List[Any] = VOCAB_FILES_NAMES
__snake_case : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__snake_case : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case : List[Any] = ["input_ids", "attention_mask"]
def __init__( self: List[str] , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Tuple=False , UpperCAmelCase_: int=False , UpperCAmelCase_: str=False , UpperCAmelCase_: Optional[Any]=None , UpperCAmelCase_: Optional[int]=None , UpperCAmelCase_: Union[str, Any]=None , UpperCAmelCase_: List[str]=None , UpperCAmelCase_: Optional[Dict[str, Any]] = None , **UpperCAmelCase_: Tuple , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
_SCREAMING_SNAKE_CASE = kwargs.get("""name_or_path""" )
if name_or_path is None:
logger.warning(
"""name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"""
""" you are testing the model, this can safely be ignored""" )
_SCREAMING_SNAKE_CASE = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
_SCREAMING_SNAKE_CASE = "<|endoftext|>" if eos_token is None else eos_token
_SCREAMING_SNAKE_CASE = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
_SCREAMING_SNAKE_CASE = unk_token if pad_token is None else pad_token
_SCREAMING_SNAKE_CASE = eos_token if bos_token is None else bos_token
else:
_SCREAMING_SNAKE_CASE = "<pad>" if pad_token is None else pad_token
_SCREAMING_SNAKE_CASE = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=__lowerCamelCase , remove_space=__lowerCamelCase , keep_accents=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
_SCREAMING_SNAKE_CASE = do_lower_case
_SCREAMING_SNAKE_CASE = remove_space
_SCREAMING_SNAKE_CASE = keep_accents
_SCREAMING_SNAKE_CASE = vocab_file
_SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCamelCase )
# Used for whitespace normalization in input texts
# fmt : off
_SCREAMING_SNAKE_CASE = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
_SCREAMING_SNAKE_CASE = re.compile(
F'[{"".join(map(__lowerCamelCase , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8_203] ) )}]' )
def __getstate__( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.__dict__.copy()
_SCREAMING_SNAKE_CASE = None
return state
def __setstate__( self: Dict , UpperCAmelCase_: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
return len(self.sp_model )
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.non_printing_characters_re.sub("""""" , __lowerCamelCase )
# Normalize whitespaces
_SCREAMING_SNAKE_CASE = "".join([char if char not in self.whitespaces else """ """ for char in text] )
# NFC Unicode normalization
_SCREAMING_SNAKE_CASE = unicodedata.normalize("""NFC""" , __lowerCamelCase )
return text
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: str , **UpperCAmelCase_: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.preprocess_text(__lowerCamelCase )
return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
def UpperCamelCase ( self: Any , UpperCAmelCase_: str ):
'''simple docstring'''
return self.sp_model.PieceToId(__lowerCamelCase )
def UpperCamelCase ( self: Any , UpperCAmelCase_: int ):
'''simple docstring'''
return self.sp_model.IdToPiece(__lowerCamelCase )
@staticmethod
def UpperCamelCase ( UpperCAmelCase_: str ):
'''simple docstring'''
return out_string
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = ""
_SCREAMING_SNAKE_CASE = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowerCamelCase ) + token
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = []
else:
current_sub_tokens.append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = False
out_string += self.sp_model.decode(__lowerCamelCase )
return out_string
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase ( self: str , UpperCAmelCase_: str , UpperCAmelCase_: Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(__lowerCamelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_SCREAMING_SNAKE_CASE = os.path.join(
__lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , """wb""" ) as fi:
_SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: Union[str, List[str]] , UpperCAmelCase_: Union[str, bool] = False ):
'''simple docstring'''
if isinstance(__lowerCamelCase , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE = self.preprocess_text(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = self.sp_model.encode(__lowerCamelCase )
else:
_SCREAMING_SNAKE_CASE = [self.preprocess_text(__lowerCamelCase ) for t in text]
_SCREAMING_SNAKE_CASE = self.sp_model.encode(__lowerCamelCase )
if return_tensors is True or return_tensors == "pt":
_SCREAMING_SNAKE_CASE = torch.tensor(__lowerCamelCase )
return token_ids
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: Union[int, List[int]] ):
'''simple docstring'''
return self.sp_model.decode(__lowerCamelCase )
def UpperCamelCase ( self: Any , UpperCAmelCase_: "Conversation" ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = [F'User: {text}' if is_user else F'Bot: {text}' for is_user, text in conversation.iter_texts()]
_SCREAMING_SNAKE_CASE = (
F'{self.eos_token}{self.bos_token}' + F'{self.bos_token}'.join(__lowerCamelCase ) + F'{self.bos_token}Bot:'
)
return self.encode(text=__lowerCamelCase )
| 306
|
import datasets
from .evaluate import evaluate
A : Dict = "\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n"
A : Any = "\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n"
A : str = "\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the CUAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\n 'aupr': Area Under the Precision-Recall curve\n 'prec_at_80_recall': Precision at 80% recall\n 'prec_at_90_recall': Precision at 90% recall\nExamples:\n >>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]\n >>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]\n >>> cuad_metric = datasets.load_metric(\"cuad\")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _lowercase ( datasets.Metric):
"""simple docstring"""
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {
"id": datasets.Value("string" ),
"prediction_text": datasets.features.Sequence(datasets.Value("string" ) ),
},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://www.atticusprojectai.org/cuad"] , reference_urls=["https://www.atticusprojectai.org/cuad"] , )
def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Dict ):
'''simple docstring'''
lowerCamelCase__ : int = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
lowerCamelCase__ : Any = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
lowerCamelCase__ : Optional[int] = evaluate(dataset=__lowerCamelCase , predictions=__lowerCamelCase )
return score
| 184
| 0
|
'''simple docstring'''
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 359
|
'''simple docstring'''
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : Optional[Any] = "T5Config"
class A__ ( A__ ):
A__ = 'mt5'
A__ = MTaConfig
class A__ ( A__ ):
A__ = 'mt5'
A__ = MTaConfig
class A__ ( A__ ):
A__ = 'mt5'
A__ = MTaConfig
| 114
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A ={
'''configuration_blenderbot''': [
'''BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlenderbotConfig''',
'''BlenderbotOnnxConfig''',
],
'''tokenization_blenderbot''': ['''BlenderbotTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''BlenderbotTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlenderbotForCausalLM''',
'''BlenderbotForConditionalGeneration''',
'''BlenderbotModel''',
'''BlenderbotPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''TFBlenderbotForConditionalGeneration''',
'''TFBlenderbotModel''',
'''TFBlenderbotPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''FlaxBlenderbotForConditionalGeneration''',
'''FlaxBlenderbotModel''',
'''FlaxBlenderbotPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 19
|
from cva import destroyAllWindows, imread, imshow, waitKey
def A__ ( __lowerCamelCase ):
# getting number of pixels in the image
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = [2_55, 2_55, 2_55] - img[i][j]
return img
if __name__ == "__main__":
# read original image
__UpperCAmelCase = imread("image_data/lena.jpg", 1)
# convert to its negative
__UpperCAmelCase = convert_to_negative(img)
# show result image
imshow("negative of original image", img)
waitKey(0)
destroyAllWindows()
| 299
| 0
|
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class __magic_name__ ( snake_case ):
UpperCamelCase_ :int = 0
UpperCamelCase_ :bool = False
UpperCamelCase_ :float = 3.0
class __magic_name__ ( unittest.TestCase ):
def UpperCAmelCase_ ( self )-> Union[str, Any]:
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"a": 2} )
self.assertDictEqual(MockClass(a=2 , b=_A ).to_kwargs() , {"a": 2, "b": True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {"a": 2, "c": 2.25} )
@require_cuda
def UpperCAmelCase_ ( self )-> int:
# If no defaults are changed, `to_kwargs` returns an empty dict.
UpperCamelCase_ = GradScalerKwargs(init_scale=1_024 , growth_factor=2 )
AcceleratorState._reset_state()
UpperCamelCase_ = Accelerator(mixed_precision="fp16" , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
UpperCamelCase_ = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1_024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2_000 )
self.assertEqual(scaler._enabled , _A )
@require_multi_gpu
def UpperCAmelCase_ ( self )-> Any:
UpperCamelCase_ = ['torchrun', F"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(_A , env=os.environ.copy() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Any = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
SCREAMING_SNAKE_CASE :Any = Accelerator(kwargs_handlers=[ddp_scaler])
SCREAMING_SNAKE_CASE :List[str] = torch.nn.Linear(100, 200)
SCREAMING_SNAKE_CASE :int = accelerator.prepare(model)
# Check the values changed in kwargs
SCREAMING_SNAKE_CASE :int = ""
SCREAMING_SNAKE_CASE :Tuple = model.bucket_bytes_cap // (1024 * 1024)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 356
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
SCREAMING_SNAKE_CASE :List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class __magic_name__ ( snake_case ):
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , )-> Optional[Any]:
super().__init__()
if safety_checker is None:
logger.warning(
F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
speech_model=_lowercase , speech_processor=_lowercase , vae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , unet=_lowercase , scheduler=_lowercase , feature_extractor=_lowercase , )
def UpperCAmelCase_ ( self , _lowercase = "auto" )-> str:
if slice_size == "auto":
UpperCamelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_lowercase )
def UpperCAmelCase_ ( self )-> Optional[int]:
self.enable_attention_slicing(_lowercase )
@torch.no_grad()
def __call__( self , _lowercase , _lowercase=16_000 , _lowercase = 512 , _lowercase = 512 , _lowercase = 50 , _lowercase = 7.5 , _lowercase = None , _lowercase = 1 , _lowercase = 0.0 , _lowercase = None , _lowercase = None , _lowercase = "pil" , _lowercase = True , _lowercase = None , _lowercase = 1 , **_lowercase , )-> str:
UpperCamelCase_ = self.speech_processor.feature_extractor(
_lowercase , return_tensors="pt" , sampling_rate=_lowercase ).input_features.to(self.device )
UpperCamelCase_ = self.speech_model.generate(_lowercase , max_length=480_000 )
UpperCamelCase_ = self.speech_processor.tokenizer.batch_decode(_lowercase , skip_special_tokens=_lowercase , normalize=_lowercase )[
0
]
if isinstance(_lowercase , _lowercase ):
UpperCamelCase_ = 1
elif isinstance(_lowercase , _lowercase ):
UpperCamelCase_ = len(_lowercase )
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(_lowercase )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_lowercase , _lowercase ) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(_lowercase )}." )
# get prompt text embeddings
UpperCamelCase_ = self.tokenizer(
_lowercase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
UpperCamelCase_ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCamelCase_ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F" {self.tokenizer.model_max_length} tokens: {removed_text}" )
UpperCamelCase_ = text_input_ids[:, : self.tokenizer.model_max_length]
UpperCamelCase_ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = text_embeddings.shape
UpperCamelCase_ = text_embeddings.repeat(1 , _lowercase , 1 )
UpperCamelCase_ = text_embeddings.view(bs_embed * num_images_per_prompt , _lowercase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCamelCase_ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCamelCase_ = 42
if negative_prompt is None:
UpperCamelCase_ = [""] * batch_size
elif type(_lowercase ) is not type(_lowercase ):
raise TypeError(
F"`negative_prompt` should be the same type to `prompt`, but got {type(_lowercase )} !="
F" {type(_lowercase )}." )
elif isinstance(_lowercase , _lowercase ):
UpperCamelCase_ = [negative_prompt]
elif batch_size != len(_lowercase ):
raise ValueError(
F"`negative_prompt`: {negative_prompt} has batch size {len(_lowercase )}, but `prompt`:"
F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`." )
else:
UpperCamelCase_ = negative_prompt
UpperCamelCase_ = text_input_ids.shape[-1]
UpperCamelCase_ = self.tokenizer(
_lowercase , padding="max_length" , max_length=_lowercase , truncation=_lowercase , return_tensors="pt" , )
UpperCamelCase_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCamelCase_ = uncond_embeddings.shape[1]
UpperCamelCase_ = uncond_embeddings.repeat(1 , _lowercase , 1 )
UpperCamelCase_ = uncond_embeddings.view(batch_size * num_images_per_prompt , _lowercase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase_ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCamelCase_ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
UpperCamelCase_ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
UpperCamelCase_ = torch.randn(_lowercase , generator=_lowercase , device="cpu" , dtype=_lowercase ).to(
self.device )
else:
UpperCamelCase_ = torch.randn(_lowercase , generator=_lowercase , device=self.device , dtype=_lowercase )
else:
if latents.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
UpperCamelCase_ = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_lowercase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
UpperCamelCase_ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase_ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCamelCase_ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase_ = {}
if accepts_eta:
UpperCamelCase_ = eta
for i, t in enumerate(self.progress_bar(_lowercase ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase_ = self.scheduler.scale_model_input(_lowercase , _lowercase )
# predict the noise residual
UpperCamelCase_ = self.unet(_lowercase , _lowercase , encoder_hidden_states=_lowercase ).sample
# perform guidance
if do_classifier_free_guidance:
UpperCamelCase_ , UpperCamelCase_ = noise_pred.chunk(2 )
UpperCamelCase_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase_ = self.scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_lowercase , _lowercase , _lowercase )
UpperCamelCase_ = 1 / 0.18_215 * latents
UpperCamelCase_ = self.vae.decode(_lowercase ).sample
UpperCamelCase_ = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCamelCase_ = self.numpy_to_pil(_lowercase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=_lowercase , nsfw_content_detected=_lowercase )
| 60
| 0
|
"""simple docstring"""
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
__A = logging.getLogger(__name__)
class _snake_case ( a__ ):
snake_case__ = "token-classification"
def __init__( self : Optional[Any] , UpperCAmelCase : Tuple ):
if type(UpperCAmelCase ) == dict:
__lowerCamelCase : int = Namespace(**UpperCAmelCase )
__lowerCamelCase : int = import_module("tasks" )
try:
__lowerCamelCase : Any = getattr(UpperCAmelCase , hparams.task_type )
__lowerCamelCase : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F"""Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
__lowerCamelCase : List[str] = self.token_classification_task.get_labels(hparams.labels )
__lowerCamelCase : Optional[int] = CrossEntropyLoss().ignore_index
super().__init__(UpperCAmelCase , len(self.labels ) , self.mode )
def lowerCamelCase__ ( self : Dict , **UpperCAmelCase : List[Any] ):
return self.model(**UpperCAmelCase )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : Any ):
__lowerCamelCase : Optional[int] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
__lowerCamelCase : Any = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
__lowerCamelCase : Optional[Any] = self(**UpperCAmelCase )
__lowerCamelCase : int = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def lowerCamelCase__ ( self : int ):
__lowerCamelCase : Any = self.hparams
for mode in ["train", "dev", "test"]:
__lowerCamelCase : Optional[Any] = self._feature_file(UpperCAmelCase )
if os.path.exists(UpperCAmelCase ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , UpperCAmelCase )
__lowerCamelCase : int = torch.load(UpperCAmelCase )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
__lowerCamelCase : Dict = self.token_classification_task.read_examples_from_file(args.data_dir , UpperCAmelCase )
__lowerCamelCase : Dict = self.token_classification_task.convert_examples_to_features(
UpperCAmelCase , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["xlnet"] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["xlnet"] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=UpperCAmelCase , pad_on_left=bool(self.config.model_type in ["xlnet"] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("Saving features into cached file %s" , UpperCAmelCase )
torch.save(UpperCAmelCase , UpperCAmelCase )
def lowerCamelCase__ ( self : str , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : bool = False ):
__lowerCamelCase : str = self._feature_file(UpperCAmelCase )
logger.info("Loading features from cached file %s" , UpperCAmelCase )
__lowerCamelCase : Optional[Any] = torch.load(UpperCAmelCase )
__lowerCamelCase : Tuple = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
__lowerCamelCase : List[Any] = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
__lowerCamelCase : Dict = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
__lowerCamelCase : Dict = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
__lowerCamelCase : Tuple = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , batch_size=UpperCAmelCase )
def lowerCamelCase__ ( self : List[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Any ):
"""Compute validation""" ""
__lowerCamelCase : Tuple = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
__lowerCamelCase : Tuple = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
__lowerCamelCase : Any = self(**UpperCAmelCase )
__lowerCamelCase , __lowerCamelCase : Optional[Any] = outputs[:2]
__lowerCamelCase : str = logits.detach().cpu().numpy()
__lowerCamelCase : List[Any] = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def lowerCamelCase__ ( self : List[Any] , UpperCAmelCase : Tuple ):
__lowerCamelCase : Union[str, Any] = torch.stack([x["val_loss"] for x in outputs] ).mean()
__lowerCamelCase : List[Any] = np.concatenate([x["pred"] for x in outputs] , axis=0 )
__lowerCamelCase : List[str] = np.argmax(UpperCAmelCase , axis=2 )
__lowerCamelCase : Dict = np.concatenate([x["target"] for x in outputs] , axis=0 )
__lowerCamelCase : int = dict(enumerate(self.labels ) )
__lowerCamelCase : Dict = [[] for _ in range(out_label_ids.shape[0] )]
__lowerCamelCase : Dict = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
__lowerCamelCase : Any = {
"val_loss": val_loss_mean,
"accuracy_score": accuracy_score(UpperCAmelCase , UpperCAmelCase ),
"precision": precision_score(UpperCAmelCase , UpperCAmelCase ),
"recall": recall_score(UpperCAmelCase , UpperCAmelCase ),
"f1": fa_score(UpperCAmelCase , UpperCAmelCase ),
}
__lowerCamelCase : Tuple = dict(results.items() )
__lowerCamelCase : Union[str, Any] = results
return ret, preds_list, out_label_list
def lowerCamelCase__ ( self : int , UpperCAmelCase : str ):
# when stable
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : int = self._eval_end(UpperCAmelCase )
__lowerCamelCase : List[Any] = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def lowerCamelCase__ ( self : Optional[int] , UpperCAmelCase : List[str] ):
# updating to test_epoch_end instead of deprecated test_end
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : int = self._eval_end(UpperCAmelCase )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
__lowerCamelCase : str = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def lowerCamelCase__ ( UpperCAmelCase : List[Any] , UpperCAmelCase : Any ):
# Add NER specific options
BaseTransformer.add_model_specific_args(UpperCAmelCase , UpperCAmelCase )
parser.add_argument(
"--task_type" , default="NER" , type=UpperCAmelCase , help="Task type to fine tune in training (e.g. NER, POS, etc)" )
parser.add_argument(
"--max_seq_length" , default=128 , type=UpperCAmelCase , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--labels" , default="" , type=UpperCAmelCase , help="Path to a file containing all labels. If not specified, CoNLL-2003 labels are used." , )
parser.add_argument(
"--gpus" , default=0 , type=UpperCAmelCase , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
if __name__ == "__main__":
__A = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
__A = NERTransformer.add_model_specific_args(parser, os.getcwd())
__A = parser.parse_args()
__A = NERTransformer(args)
__A = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
__A = sorted(glob.glob(os.path.join(args.output_dir, '''checkpoint-epoch=*.ckpt'''), recursive=True))
__A = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 135
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _snake_case ( a__ , unittest.TestCase ):
snake_case__ = CTRLTokenizer
snake_case__ = False
snake_case__ = False
def lowerCamelCase__ ( self : Union[str, Any] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowerCamelCase : Optional[int] = ["adapt", "re@@", "a@@", "apt", "c@@", "t", "<unk>"]
__lowerCamelCase : str = dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) )
__lowerCamelCase : Any = ["#version: 0.2", "a p", "ap t</w>", "r e", "a d", "ad apt</w>", ""]
__lowerCamelCase : Dict = {"unk_token": "<unk>"}
__lowerCamelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__lowerCamelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(UpperCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(UpperCAmelCase ) )
def lowerCamelCase__ ( self : Tuple , **UpperCAmelCase : List[str] ):
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def lowerCamelCase__ ( self : Dict , UpperCAmelCase : Dict ):
__lowerCamelCase : Any = "adapt react readapt apt"
__lowerCamelCase : Dict = "adapt react readapt apt"
return input_text, output_text
def lowerCamelCase__ ( self : List[Any] ):
__lowerCamelCase : List[str] = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowerCamelCase : Dict = "adapt react readapt apt"
__lowerCamelCase : Dict = "adapt re@@ a@@ c@@ t re@@ adapt apt".split()
__lowerCamelCase : List[str] = tokenizer.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase : Any = tokens + [tokenizer.unk_token]
__lowerCamelCase : Any = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , UpperCAmelCase )
| 135
| 1
|
"""simple docstring"""
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
_snake_case = 'facebook/wmt19-en-de'
_snake_case = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
_snake_case = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
_snake_case = FSMTForConditionalGeneration(config)
print(F'''num of params {tiny_model.num_parameters()}''')
# Test
_snake_case = tokenizer(['Making tiny model'], return_tensors='pt')
_snake_case = tiny_model(**batch)
print('test output:', len(outputs.logits[0]))
# Save
_snake_case = 'tiny-wmt19-en-de'
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 324
|
"""simple docstring"""
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
_snake_case = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
_snake_case = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
_snake_case = re.compile(r'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
_snake_case = re.compile(r'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_snake_case = re.compile(r'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
_snake_case = [
('pretraining', 'MODEL_FOR_PRETRAINING_MAPPING_NAMES', 'AutoModelForPreTraining'),
('feature-extraction', 'MODEL_MAPPING_NAMES', 'AutoModel'),
('audio-classification', 'MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioClassification'),
('text-generation', 'MODEL_FOR_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForCausalLM'),
('automatic-speech-recognition', 'MODEL_FOR_CTC_MAPPING_NAMES', 'AutoModelForCTC'),
('image-classification', 'MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForImageClassification'),
('image-segmentation', 'MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES', 'AutoModelForImageSegmentation'),
('fill-mask', 'MODEL_FOR_MASKED_LM_MAPPING_NAMES', 'AutoModelForMaskedLM'),
('object-detection', 'MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForObjectDetection'),
(
'zero-shot-object-detection',
'MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES',
'AutoModelForZeroShotObjectDetection',
),
('question-answering', 'MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForQuestionAnswering'),
('text2text-generation', 'MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForSeq2SeqLM'),
('text-classification', 'MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForSequenceClassification'),
('automatic-speech-recognition', 'MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES', 'AutoModelForSpeechSeq2Seq'),
(
'table-question-answering',
'MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForTableQuestionAnswering',
),
('token-classification', 'MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForTokenClassification'),
('multiple-choice', 'MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES', 'AutoModelForMultipleChoice'),
(
'next-sentence-prediction',
'MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES',
'AutoModelForNextSentencePrediction',
),
(
'audio-frame-classification',
'MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForAudioFrameClassification',
),
('audio-xvector', 'MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES', 'AutoModelForAudioXVector'),
(
'document-question-answering',
'MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForDocumentQuestionAnswering',
),
(
'visual-question-answering',
'MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForVisualQuestionAnswering',
),
('image-to-text', 'MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES', 'AutoModelForVision2Seq'),
(
'zero-shot-image-classification',
'MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForZeroShotImageClassification',
),
('depth-estimation', 'MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES', 'AutoModelForDepthEstimation'),
('video-classification', 'MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForVideoClassification'),
('mask-generation', 'MODEL_FOR_MASK_GENERATION_MAPPING_NAMES', 'AutoModelForMaskGeneration'),
]
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : Dict = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , UpperCamelCase__ )
return [m.group(0 ) for m in matches]
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : Tuple = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_a : Optional[int] = {
config.replace("""Config""" , """""" ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
_a : List[Any] = collections.defaultdict(UpperCamelCase__ )
_a : List[str] = collections.defaultdict(UpperCamelCase__ )
_a : Tuple = collections.defaultdict(UpperCamelCase__ )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(UpperCamelCase__ ):
_a : str = None
if _re_tf_models.match(UpperCamelCase__ ) is not None:
_a : List[Any] = tf_models
_a : int = _re_tf_models.match(UpperCamelCase__ ).groups()[0]
elif _re_flax_models.match(UpperCamelCase__ ) is not None:
_a : Any = flax_models
_a : Any = _re_flax_models.match(UpperCamelCase__ ).groups()[0]
elif _re_pt_models.match(UpperCamelCase__ ) is not None:
_a : int = pt_models
_a : int = _re_pt_models.match(UpperCamelCase__ ).groups()[0]
if lookup_dict is not None:
while len(UpperCamelCase__ ) > 0:
if attr_name in model_prefix_to_model_type:
_a : Optional[int] = True
break
# Try again after removing the last word in the name
_a : List[Any] = """""".join(camel_case_split(UpperCamelCase__ )[:-1] )
_a : Optional[int] = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
_a : Dict = list(UpperCamelCase__ )
all_models.sort()
_a : str = {"""model_type""": all_models}
_a : List[Any] = [pt_models[t] for t in all_models]
_a : str = [tf_models[t] for t in all_models]
_a : Optional[int] = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
_a : str = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
_a : List[str] = """AutoProcessor"""
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
_a : str = """AutoTokenizer"""
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
_a : int = """AutoFeatureExtractor"""
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
_a : int = """AutoTokenizer"""
_a : Any = [processors[t] for t in all_models]
return pd.DataFrame(UpperCamelCase__ )
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : List[Any] = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
_a : List[Any] = [model_mapping, F"""TF_{model_mapping}""", F"""FLAX_{model_mapping}"""]
_a : Union[str, Any] = [auto_class, F"""TF_{auto_class}""", F"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
# The type of pipeline may not exist in this framework
if not hasattr(UpperCamelCase__ , UpperCamelCase__ ):
continue
# First extract all model_names
_a : str = []
for name in getattr(UpperCamelCase__ , UpperCamelCase__ ).values():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
model_names.append(UpperCamelCase__ )
else:
model_names.extend(list(UpperCamelCase__ ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Dict = get_frameworks_table()
_a : Optional[Any] = Dataset.from_pandas(UpperCamelCase__ )
_a : Any = hf_hub_download(
"""huggingface/transformers-metadata""" , """pipeline_tags.json""" , repo_type="""dataset""" , token=UpperCamelCase__ )
_a : List[Any] = Dataset.from_json(UpperCamelCase__ )
_a : List[str] = {
tags_dataset[i]["""model_class"""]: (tags_dataset[i]["""pipeline_tag"""], tags_dataset[i]["""auto_class"""])
for i in range(len(UpperCamelCase__ ) )
}
_a : str = update_pipeline_and_auto_class_table(UpperCamelCase__ )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
_a : int = sorted(table.keys() )
_a : Union[str, Any] = pd.DataFrame(
{
"""model_class""": model_classes,
"""pipeline_tag""": [table[m][0] for m in model_classes],
"""auto_class""": [table[m][1] for m in model_classes],
} )
_a : Dict = Dataset.from_pandas(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(UpperCamelCase__ , """frameworks.json""" ) )
tags_dataset.to_json(os.path.join(UpperCamelCase__ , """pipeline_tags.json""" ) )
if commit_sha is not None:
_a : List[str] = (
F"""Update with commit {commit_sha}\n\nSee: """
F"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
_a : Optional[Any] = """Update"""
upload_folder(
repo_id="""huggingface/transformers-metadata""" , folder_path=UpperCamelCase__ , repo_type="""dataset""" , token=UpperCamelCase__ , commit_message=UpperCamelCase__ , )
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : List[str] = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
_a : Any = transformers_module.pipelines.SUPPORTED_TASKS
_a : List[str] = []
for key in pipeline_tasks:
if key not in in_table:
_a : Tuple = pipeline_tasks[key]["""pt"""]
if isinstance(UpperCamelCase__ , (list, tuple) ):
_a : Dict = model[0]
_a : List[str] = model.__name__
if model not in in_table.values():
missing.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
_a : Union[str, Any] = """, """.join(UpperCamelCase__ )
raise ValueError(
"""The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside """
F"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('--token', type=str, help='The token to use to push to the transformers-metadata dataset.')
parser.add_argument('--commit_sha', type=str, help='The sha of the commit going with this update.')
parser.add_argument('--check-only', action='store_true', help='Activate to just check all pipelines are present.')
_snake_case = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 324
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
__A : Optional[int] = {
'''configuration_speech_to_text''': ['''SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Speech2TextConfig'''],
'''processing_speech_to_text''': ['''Speech2TextProcessor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Tuple = ['''Speech2TextTokenizer''']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = ['''Speech2TextFeatureExtractor''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = [
'''TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSpeech2TextForConditionalGeneration''',
'''TFSpeech2TextModel''',
'''TFSpeech2TextPreTrainedModel''',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = [
'''SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Speech2TextForConditionalGeneration''',
'''Speech2TextModel''',
'''Speech2TextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
__A : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 33
|
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def lowerCamelCase__ ( ):
"""simple docstring"""
lowerCAmelCase_ = ArgumentParser("Transformers CLI tool" , usage="transformers-cli <command> [<args>]" )
lowerCAmelCase_ = parser.add_subparsers(help="transformers-cli command helpers" )
# Register commands
ConvertCommand.register_subcommand(__lowerCAmelCase )
DownloadCommand.register_subcommand(__lowerCAmelCase )
EnvironmentCommand.register_subcommand(__lowerCAmelCase )
RunCommand.register_subcommand(__lowerCAmelCase )
ServeCommand.register_subcommand(__lowerCAmelCase )
UserCommands.register_subcommand(__lowerCAmelCase )
AddNewModelCommand.register_subcommand(__lowerCAmelCase )
AddNewModelLikeCommand.register_subcommand(__lowerCAmelCase )
LfsCommands.register_subcommand(__lowerCAmelCase )
PTtoTFCommand.register_subcommand(__lowerCAmelCase )
# Let's go
lowerCAmelCase_ = parser.parse_args()
if not hasattr(__lowerCAmelCase , "func" ):
parser.print_help()
exit(1 )
# Run
lowerCAmelCase_ = args.func(__lowerCAmelCase )
service.run()
if __name__ == "__main__":
main()
| 231
| 0
|
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
UpperCAmelCase_ = logging.getLogger(__name__)
class UpperCamelCase_ ( _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None ) -> Optional[int]:
super().__init__(
lowerCAmelCase_ , question_encoder_tokenizer=lowerCAmelCase_ , generator_tokenizer=lowerCAmelCase_ , index=lowerCAmelCase_ , init_retrieval=lowerCAmelCase_ , )
_snake_case = None
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tuple:
logger.info('initializing retrieval' )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info('dist initialized' )
# needs to be set manually
_snake_case = self._infer_socket_ifname()
# avoid clash with the NCCL port
_snake_case = str(distributed_port + 1 )
_snake_case = dist.new_group(ranks=lowerCAmelCase_ , backend='gloo' )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info('dist not initialized / main' )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def lowerCAmelCase ( self ) -> Tuple:
return dist.get_rank(group=self.process_group ) == 0
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=torch.floataa ) -> List[str]:
_snake_case = torch.empty(lowerCAmelCase_ , dtype=lowerCAmelCase_ )
dist.scatter(lowerCAmelCase_ , src=0 , scatter_list=lowerCAmelCase_ , group=self.process_group )
return target_tensor
def lowerCAmelCase ( self ) -> Dict:
_snake_case = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
_snake_case = next((addr for addr in addrs if addr.startswith('e' )) , lowerCAmelCase_ )
return ifname
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple[np.ndarray, List[dict]]:
# single GPU training
if not dist.is_initialized():
_snake_case , _snake_case = self._main_retrieve(lowerCAmelCase_ , lowerCAmelCase_ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowerCAmelCase_ )
# distributed training
_snake_case = dist.get_world_size(group=self.process_group )
# gather logic
_snake_case = None
if self._is_main():
_snake_case = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(lowerCAmelCase_ )]
dist.gather(torch.tensor(lowerCAmelCase_ ) , dst=0 , gather_list=lowerCAmelCase_ , group=self.process_group )
# scatter logic
_snake_case = question_hidden_states.shape[0]
_snake_case = []
_snake_case = []
if self._is_main():
assert len(lowerCAmelCase_ ) == world_size
_snake_case , _snake_case = self._main_retrieve(torch.cat(lowerCAmelCase_ ).numpy() , lowerCAmelCase_ )
_snake_case , _snake_case = torch.tensor(lowerCAmelCase_ ), torch.tensor(lowerCAmelCase_ )
_snake_case = self._chunk_tensor(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self._chunk_tensor(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self._scattered(lowerCAmelCase_ , [n_queries, n_docs] , target_type=torch.intaa )
_snake_case = self._scattered(lowerCAmelCase_ , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(lowerCAmelCase_ )
| 368
|
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
UpperCAmelCase_ = logging.get_logger(__name__)
class UpperCamelCase_ ( _lowerCamelCase ):
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> None:
warnings.warn(
'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use FlavaImageProcessor instead.' , lowerCAmelCase_ , )
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
| 295
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json",
}
class UpperCamelCase__( lowercase__ , lowercase__ ):
lowerCAmelCase__ : List[Any] = 'focalnet'
def __init__( self ,__UpperCAmelCase=2_24 ,__UpperCAmelCase=4 ,__UpperCAmelCase=3 ,__UpperCAmelCase=96 ,__UpperCAmelCase=False ,__UpperCAmelCase=[1_92, 3_84, 7_68, 7_68] ,__UpperCAmelCase=[2, 2, 6, 2] ,__UpperCAmelCase=[2, 2, 2, 2] ,__UpperCAmelCase=[3, 3, 3, 3] ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=4.0 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=False ,__UpperCAmelCase=1e-4 ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=1e-5 ,__UpperCAmelCase=32 ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,**__UpperCAmelCase ,) -> Union[str, Any]:
super().__init__(**__lowercase )
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = embed_dim
A__ = use_conv_embed
A__ = hidden_sizes
A__ = depths
A__ = focal_levels
A__ = focal_windows
A__ = hidden_act
A__ = mlp_ratio
A__ = hidden_dropout_prob
A__ = drop_path_rate
A__ = use_layerscale
A__ = layerscale_value
A__ = use_post_layernorm
A__ = use_post_layernorm_in_modulation
A__ = normalize_modulator
A__ = initializer_range
A__ = layer_norm_eps
A__ = encoder_stride
A__ = ["""stem"""] + [f'''stage{idx}''' for idx in range(1 ,len(self.depths ) + 1 )]
A__ = get_aligned_output_features_output_indices(
out_features=__lowercase ,out_indices=__lowercase ,stage_names=self.stage_names )
| 221
|
from math import log
from scipy.constants import Boltzmann, physical_constants
a : Any = 300 # TEMPERATURE (unit = K)
def lowerCamelCase__ ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : float , ):
if donor_conc <= 0:
raise ValueError("""Donor concentration should be positive""" )
elif acceptor_conc <= 0:
raise ValueError("""Acceptor concentration should be positive""" )
elif intrinsic_conc <= 0:
raise ValueError("""Intrinsic concentration should be positive""" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"""Donor concentration should be greater than intrinsic concentration""" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"""Acceptor concentration should be greater than intrinsic concentration""" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 114
| 0
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = ["image_processor", "tokenizer"]
snake_case = "CLIPImageProcessor"
snake_case = ("XLMRobertaTokenizer", "XLMRobertaTokenizerFast")
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE )->Tuple:
'''simple docstring'''
A_ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _SCREAMING_SNAKE_CASE , )
A_ : Tuple = kwargs.pop('''feature_extractor''' )
A_ : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE )->int:
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
A_ : List[str] = self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if images is not None:
A_ : List[Any] = self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is not None and images is not None:
A_ : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_SCREAMING_SNAKE_CASE ) , tensor_type=_SCREAMING_SNAKE_CASE )
def _snake_case ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->List[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->str:
'''simple docstring'''
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def _snake_case ( self )->Dict:
'''simple docstring'''
A_ : List[Any] = self.tokenizer.model_input_names
A_ : Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 65
|
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = ["image_processor"]
snake_case = "SamImageProcessor"
def __init__( self , _SCREAMING_SNAKE_CASE )->Union[str, Any]:
'''simple docstring'''
super().__init__(_SCREAMING_SNAKE_CASE )
A_ : Any = self.image_processor
A_ : Optional[int] = -10
A_ : List[Any] = self.image_processor.size['''longest_edge''']
def __call__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )->BatchEncoding:
'''simple docstring'''
A_ : Union[str, Any] = self.image_processor(
_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# pop arguments that are not used in the foward but used nevertheless
A_ : Tuple = encoding_image_processor['''original_sizes''']
if hasattr(_SCREAMING_SNAKE_CASE , '''numpy''' ): # Checks if Torch or TF tensor
A_ : int = original_sizes.numpy()
A_ , A_ , A_ : str = self._check_and_preprocess_points(
input_points=_SCREAMING_SNAKE_CASE , input_labels=_SCREAMING_SNAKE_CASE , input_boxes=_SCREAMING_SNAKE_CASE , )
A_ : Optional[Any] = self._normalize_and_convert(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , input_points=_SCREAMING_SNAKE_CASE , input_labels=_SCREAMING_SNAKE_CASE , input_boxes=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , )
return encoding_image_processor
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="pt" , )->Dict:
'''simple docstring'''
if input_points is not None:
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
A_ : Optional[Any] = [
self._normalize_coordinates(self.target_size , _SCREAMING_SNAKE_CASE , original_sizes[0] ) for point in input_points
]
else:
A_ : str = [
self._normalize_coordinates(self.target_size , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for point, original_size in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
A_ , A_ : Optional[Any] = self._pad_points_and_labels(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : List[str] = np.array(_SCREAMING_SNAKE_CASE )
if input_labels is not None:
A_ : Dict = np.array(_SCREAMING_SNAKE_CASE )
if input_boxes is not None:
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
A_ : Tuple = [
self._normalize_coordinates(self.target_size , _SCREAMING_SNAKE_CASE , original_sizes[0] , is_bounding_box=_SCREAMING_SNAKE_CASE )
for box in input_boxes
]
else:
A_ : List[Any] = [
self._normalize_coordinates(self.target_size , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , is_bounding_box=_SCREAMING_SNAKE_CASE )
for box, original_size in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
]
A_ : Union[str, Any] = np.array(_SCREAMING_SNAKE_CASE )
if input_boxes is not None:
if return_tensors == "pt":
A_ : Dict = torch.from_numpy(_SCREAMING_SNAKE_CASE )
# boxes batch size of 1 by default
A_ : Optional[Any] = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
A_ : Optional[int] = tf.convert_to_tensor(_SCREAMING_SNAKE_CASE )
# boxes batch size of 1 by default
A_ : List[Any] = tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({'''input_boxes''': input_boxes} )
if input_points is not None:
if return_tensors == "pt":
A_ : Union[str, Any] = torch.from_numpy(_SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
A_ : Union[str, Any] = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
A_ : List[str] = tf.convert_to_tensor(_SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
A_ : Union[str, Any] = tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({'''input_points''': input_points} )
if input_labels is not None:
if return_tensors == "pt":
A_ : Optional[Any] = torch.from_numpy(_SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
A_ : List[Any] = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
A_ : int = tf.convert_to_tensor(_SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
A_ : List[Any] = tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({'''input_labels''': input_labels} )
return encoding_image_processor
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Dict:
'''simple docstring'''
A_ : Optional[Any] = max([point.shape[0] for point in input_points] )
A_ : int = []
for i, point in enumerate(_SCREAMING_SNAKE_CASE ):
if point.shape[0] != expected_nb_points:
A_ : Optional[int] = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
A_ : int = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(_SCREAMING_SNAKE_CASE )
A_ : Optional[int] = processed_input_points
return input_points, input_labels
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False )->np.ndarray:
'''simple docstring'''
A_ , A_ : str = original_size
A_ , A_ : Dict = self.image_processor._get_preprocess_shape(_SCREAMING_SNAKE_CASE , longest_edge=_SCREAMING_SNAKE_CASE )
A_ : Optional[int] = deepcopy(_SCREAMING_SNAKE_CASE ).astype(_SCREAMING_SNAKE_CASE )
if is_bounding_box:
A_ : Union[str, Any] = coords.reshape(-1 , 2 , 2 )
A_ : Any = coords[..., 0] * (new_w / old_w)
A_ : List[str] = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
A_ : str = coords.reshape(-1 , 4 )
return coords
def _snake_case ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , )->str:
'''simple docstring'''
if input_points is not None:
if hasattr(_SCREAMING_SNAKE_CASE , '''numpy''' ): # Checks for TF or Torch tensor
A_ : List[str] = input_points.numpy().tolist()
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not isinstance(input_points[0] , _SCREAMING_SNAKE_CASE ):
raise ValueError('''Input points must be a list of list of floating points.''' )
A_ : Optional[Any] = [np.array(_SCREAMING_SNAKE_CASE ) for input_point in input_points]
else:
A_ : Tuple = None
if input_labels is not None:
if hasattr(_SCREAMING_SNAKE_CASE , '''numpy''' ):
A_ : Dict = input_labels.numpy().tolist()
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not isinstance(input_labels[0] , _SCREAMING_SNAKE_CASE ):
raise ValueError('''Input labels must be a list of list integers.''' )
A_ : Union[str, Any] = [np.array(_SCREAMING_SNAKE_CASE ) for label in input_labels]
else:
A_ : str = None
if input_boxes is not None:
if hasattr(_SCREAMING_SNAKE_CASE , '''numpy''' ):
A_ : str = input_boxes.numpy().tolist()
if (
not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
or not isinstance(input_boxes[0] , _SCREAMING_SNAKE_CASE )
or not isinstance(input_boxes[0][0] , _SCREAMING_SNAKE_CASE )
):
raise ValueError('''Input boxes must be a list of list of list of floating points.''' )
A_ : Tuple = [np.array(_SCREAMING_SNAKE_CASE ).astype(np.floataa ) for box in input_boxes]
else:
A_ : Dict = None
return input_points, input_labels, input_boxes
@property
def _snake_case ( self )->List[str]:
'''simple docstring'''
A_ : Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(_SCREAMING_SNAKE_CASE ) )
def _snake_case ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->Union[str, Any]:
'''simple docstring'''
return self.image_processor.post_process_masks(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 65
| 1
|
'''simple docstring'''
def snake_case_ ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
lowercase_ : Union[str, Any] = int(__SCREAMING_SNAKE_CASE )
if n_element < 1:
lowercase_ : str = ValueError('''a should be a positive number''' )
raise my_error
lowercase_ : str = [1]
lowercase_ , lowercase_ , lowercase_ : Any = (0, 0, 0)
lowercase_ : Any = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
_lowercase : str = input("Enter the last number (nth term) of the Hamming Number Series: ")
print("Formula of Hamming Number Series => 2^i * 3^j * 5^k")
_lowercase : List[Any] = hamming(int(n))
print("-----------------------------------------------------")
print(f"""The list with nth numbers is: {hamming_numbers}""")
print("-----------------------------------------------------")
| 93
|
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ : int = logging.get_logger(__name__)
def _snake_case ( _snake_case : Union[str, Any] ):
lowerCAmelCase : Dict = OrderedDict()
for key, value in state_dict.items():
if key.startswith('''module.encoder''' ):
lowerCAmelCase : Union[str, Any] = key.replace('''module.encoder''' , '''glpn.encoder''' )
if key.startswith('''module.decoder''' ):
lowerCAmelCase : str = key.replace('''module.decoder''' , '''decoder.stages''' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCAmelCase : Union[str, Any] = key[key.find('''patch_embed''' ) + len('''patch_embed''' )]
lowerCAmelCase : str = key.replace(f'''patch_embed{idx}''' , f'''patch_embeddings.{int(_snake_case )-1}''' )
if "norm" in key:
lowerCAmelCase : str = key.replace('''norm''' , '''layer_norm''' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCAmelCase : Optional[int] = key[key.find('''glpn.encoder.layer_norm''' ) + len('''glpn.encoder.layer_norm''' )]
lowerCAmelCase : List[str] = key.replace(f'''layer_norm{idx}''' , f'''layer_norm.{int(_snake_case )-1}''' )
if "layer_norm1" in key:
lowerCAmelCase : Union[str, Any] = key.replace('''layer_norm1''' , '''layer_norm_1''' )
if "layer_norm2" in key:
lowerCAmelCase : Any = key.replace('''layer_norm2''' , '''layer_norm_2''' )
if "block" in key:
# replace for example block1 by block.0
lowerCAmelCase : Tuple = key[key.find('''block''' ) + len('''block''' )]
lowerCAmelCase : Tuple = key.replace(f'''block{idx}''' , f'''block.{int(_snake_case )-1}''' )
if "attn.q" in key:
lowerCAmelCase : Optional[Any] = key.replace('''attn.q''' , '''attention.self.query''' )
if "attn.proj" in key:
lowerCAmelCase : Dict = key.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in key:
lowerCAmelCase : List[str] = key.replace('''attn''' , '''attention.self''' )
if "fc1" in key:
lowerCAmelCase : List[Any] = key.replace('''fc1''' , '''dense1''' )
if "fc2" in key:
lowerCAmelCase : Optional[Any] = key.replace('''fc2''' , '''dense2''' )
if "linear_pred" in key:
lowerCAmelCase : List[Any] = key.replace('''linear_pred''' , '''classifier''' )
if "linear_fuse" in key:
lowerCAmelCase : Optional[Any] = key.replace('''linear_fuse.conv''' , '''linear_fuse''' )
lowerCAmelCase : int = key.replace('''linear_fuse.bn''' , '''batch_norm''' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCAmelCase : Optional[Any] = key[key.find('''linear_c''' ) + len('''linear_c''' )]
lowerCAmelCase : int = key.replace(f'''linear_c{idx}''' , f'''linear_c.{int(_snake_case )-1}''' )
if "bot_conv" in key:
lowerCAmelCase : str = key.replace('''bot_conv''' , '''0.convolution''' )
if "skip_conv1" in key:
lowerCAmelCase : int = key.replace('''skip_conv1''' , '''1.convolution''' )
if "skip_conv2" in key:
lowerCAmelCase : str = key.replace('''skip_conv2''' , '''2.convolution''' )
if "fusion1" in key:
lowerCAmelCase : Union[str, Any] = key.replace('''fusion1''' , '''1.fusion''' )
if "fusion2" in key:
lowerCAmelCase : Any = key.replace('''fusion2''' , '''2.fusion''' )
if "fusion3" in key:
lowerCAmelCase : List[Any] = key.replace('''fusion3''' , '''3.fusion''' )
if "fusion" in key and "conv" in key:
lowerCAmelCase : Union[str, Any] = key.replace('''conv''' , '''convolutional_layer''' )
if key.startswith('''module.last_layer_depth''' ):
lowerCAmelCase : Optional[Any] = key.replace('''module.last_layer_depth''' , '''head.head''' )
lowerCAmelCase : Union[str, Any] = value
return new_state_dict
def _snake_case ( _snake_case : Optional[Any] , _snake_case : str ):
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCAmelCase : int = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' )
lowerCAmelCase : Optional[int] = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
lowerCAmelCase : str = kv_weight[
: config.hidden_sizes[i], :
]
lowerCAmelCase : Union[str, Any] = kv_bias[: config.hidden_sizes[i]]
lowerCAmelCase : Dict = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCAmelCase : List[str] = kv_bias[config.hidden_sizes[i] :]
def _snake_case ( ):
lowerCAmelCase : int = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase : str = Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
return image
@torch.no_grad()
def _snake_case ( _snake_case : Dict , _snake_case : Dict , _snake_case : Union[str, Any]=False , _snake_case : List[str]=None ):
lowerCAmelCase : Optional[int] = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
lowerCAmelCase : Union[str, Any] = GLPNImageProcessor()
# prepare image
lowerCAmelCase : Tuple = prepare_img()
lowerCAmelCase : Dict = image_processor(images=_snake_case , return_tensors='''pt''' ).pixel_values
logger.info('''Converting model...''' )
# load original state dict
lowerCAmelCase : List[str] = torch.load(_snake_case , map_location=torch.device('''cpu''' ) )
# rename keys
lowerCAmelCase : Tuple = rename_keys(_snake_case )
# key and value matrices need special treatment
read_in_k_v(_snake_case , _snake_case )
# create HuggingFace model and load state dict
lowerCAmelCase : str = GLPNForDepthEstimation(_snake_case )
model.load_state_dict(_snake_case )
model.eval()
# forward pass
lowerCAmelCase : Union[str, Any] = model(_snake_case )
lowerCAmelCase : int = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
lowerCAmelCase : str = torch.tensor(
[[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] )
elif "kitti" in model_name:
lowerCAmelCase : str = torch.tensor(
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] )
else:
raise ValueError(f'''Unknown model name: {model_name}''' )
lowerCAmelCase : List[Any] = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , _snake_case , atol=1E-4 )
print('''Looks ok!''' )
# finally, push to hub if required
if push_to_hub:
logger.info('''Pushing model and image processor to the hub...''' )
model.push_to_hub(
repo_path_or_name=Path(_snake_case , _snake_case ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=_snake_case , )
image_processor.push_to_hub(
repo_path_or_name=Path(_snake_case , _snake_case ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=_snake_case , )
if __name__ == "__main__":
snake_case__ : Tuple = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''',
default=None,
type=str,
help='''Path to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.'''
)
parser.add_argument(
'''--model_name''',
default='''glpn-kitti''',
type=str,
help='''Name of the model in case you\'re pushing to the hub.''',
)
snake_case__ : List[str] = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 60
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'''facebook/xmod-base''': '''https://huggingface.co/facebook/xmod-base/resolve/main/config.json''',
'''facebook/xmod-large-prenorm''': '''https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json''',
'''facebook/xmod-base-13-125k''': '''https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-125k''': '''https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-195k''': '''https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json''',
'''facebook/xmod-base-60-125k''': '''https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json''',
'''facebook/xmod-base-60-265k''': '''https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json''',
'''facebook/xmod-base-75-125k''': '''https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json''',
'''facebook/xmod-base-75-269k''': '''https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json''',
}
class lowercase_ ( __lowercase ):
UpperCamelCase_ : Tuple = "xmod"
def __init__( self : Tuple , A__ : Dict=30522 , A__ : Tuple=768 , A__ : int=12 , A__ : Dict=12 , A__ : Any=3072 , A__ : Tuple="gelu" , A__ : Any=0.1 , A__ : List[Any]=0.1 , A__ : Union[str, Any]=512 , A__ : Dict=2 , A__ : str=0.02 , A__ : Optional[Any]=1e-12 , A__ : List[Any]=1 , A__ : Optional[Any]=0 , A__ : Any=2 , A__ : Tuple="absolute" , A__ : Optional[int]=True , A__ : Tuple=None , A__ : Optional[int]=False , A__ : Union[str, Any]=2 , A__ : Any=False , A__ : str=True , A__ : Optional[Any]=True , A__ : int=("en_XX",) , A__ : str=None , **A__ : Tuple , ) -> Dict:
super().__init__(pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ , **A__ )
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = hidden_act
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = position_embedding_type
_snake_case = use_cache
_snake_case = classifier_dropout
_snake_case = pre_norm
_snake_case = adapter_reduction_factor
_snake_case = adapter_layer_norm
_snake_case = adapter_reuse_layer_norm
_snake_case = ln_before_adapter
_snake_case = list(A__ )
_snake_case = default_language
class lowercase_ ( __lowercase ):
@property
def UpperCamelCase_ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_snake_case = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_snake_case = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 278
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
__A = logging.getLogger(__name__)
@dataclass
class lowercase_ :
UpperCamelCase_ : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCamelCase_ : Optional[str] = field(
default=__lowercase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCamelCase_ : Optional[str] = field(
default=__lowercase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCamelCase_ : Optional[str] = field(
default=__lowercase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
UpperCamelCase_ : bool = field(default=__lowercase , metadata={"help": "Whether tp freeze the encoder."} )
UpperCamelCase_ : bool = field(default=__lowercase , metadata={"help": "Whether to freeze the embeddings."} )
@dataclass
class lowercase_ :
UpperCamelCase_ : str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
UpperCamelCase_ : Optional[str] = field(
default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , )
UpperCamelCase_ : Optional[int] = field(
default=1_0_2_4 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCamelCase_ : Optional[int] = field(
default=1_2_8 , metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCamelCase_ : Optional[int] = field(
default=1_4_2 , metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
} , )
UpperCamelCase_ : Optional[int] = field(
default=1_4_2 , metadata={
"help": (
"The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCamelCase_ : Optional[int] = field(default=-1 , metadata={"help": "# training examples. -1 means use all."} )
UpperCamelCase_ : Optional[int] = field(default=-1 , metadata={"help": "# validation examples. -1 means use all."} )
UpperCamelCase_ : Optional[int] = field(default=-1 , metadata={"help": "# test examples. -1 means use all."} )
UpperCamelCase_ : Optional[str] = field(default=__lowercase , metadata={"help": "Source language id for translation."} )
UpperCamelCase_ : Optional[str] = field(default=__lowercase , metadata={"help": "Target language id for translation."} )
UpperCamelCase_ : Optional[int] = field(default=__lowercase , metadata={"help": "# num_beams to use for evaluation."} )
UpperCamelCase_ : bool = field(
default=__lowercase , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , )
def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[Any]:
"""simple docstring"""
logger.info(F"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(F""" {key} = {metrics[key]}""" )
save_json(_UpperCamelCase , os.path.join(_UpperCamelCase , F"""{split}_results.json""" ) )
def snake_case_() -> List[Any]:
"""simple docstring"""
_snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_snake_case, _snake_case, _snake_case = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_snake_case, _snake_case, _snake_case = parser.parse_args_into_dataclasses()
check_output_dir(_UpperCamelCase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , _UpperCamelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_snake_case = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_snake_case = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
assert hasattr(_UpperCamelCase , _UpperCamelCase ), F"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(_UpperCamelCase , _UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase ) )
_snake_case = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_snake_case = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=_UpperCamelCase , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(_UpperCamelCase , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
_snake_case = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(_UpperCamelCase , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(_UpperCamelCase , _UpperCamelCase ):
_snake_case = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
_snake_case = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(_UpperCamelCase )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
_snake_case = SeqaSeqDataset
# Get datasets
_snake_case = (
dataset_class(
_UpperCamelCase , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_train
else None
)
_snake_case = (
dataset_class(
_UpperCamelCase , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
_snake_case = (
dataset_class(
_UpperCamelCase , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
_snake_case = (
build_compute_metrics_fn(data_args.task , _UpperCamelCase ) if training_args.predict_with_generate else None
)
_snake_case = SeqaSeqTrainer(
model=_UpperCamelCase , args=_UpperCamelCase , data_args=_UpperCamelCase , train_dataset=_UpperCamelCase , eval_dataset=_UpperCamelCase , data_collator=SeqaSeqDataCollator(
_UpperCamelCase , _UpperCamelCase , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_UpperCamelCase , tokenizer=_UpperCamelCase , )
_snake_case = {}
# Training
if training_args.do_train:
logger.info('''*** Train ***''' )
_snake_case = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
_snake_case = train_result.metrics
_snake_case = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('''train''' , _UpperCamelCase , training_args.output_dir )
all_metrics.update(_UpperCamelCase )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_snake_case = trainer.evaluate(metric_key_prefix='''val''' )
_snake_case = data_args.n_val
_snake_case = round(metrics['''val_loss'''] , 4 )
if trainer.is_world_process_zero():
handle_metrics('''val''' , _UpperCamelCase , training_args.output_dir )
all_metrics.update(_UpperCamelCase )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
_snake_case = trainer.predict(test_dataset=_UpperCamelCase , metric_key_prefix='''test''' )
_snake_case = test_output.metrics
_snake_case = data_args.n_test
if trainer.is_world_process_zero():
_snake_case = round(metrics['''test_loss'''] , 4 )
handle_metrics('''test''' , _UpperCamelCase , training_args.output_dir )
all_metrics.update(_UpperCamelCase )
if training_args.predict_with_generate:
_snake_case = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase )
_snake_case = lmap(str.strip , _UpperCamelCase )
write_txt_file(_UpperCamelCase , os.path.join(training_args.output_dir , '''test_generations.txt''' ) )
if trainer.is_world_process_zero():
save_json(_UpperCamelCase , os.path.join(training_args.output_dir , '''all_results.json''' ) )
return all_metrics
def snake_case_(_UpperCamelCase ) -> List[str]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 278
| 1
|
from collections.abc import Sequence
from queue import Queue
class lowercase__ :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None )-> str:
'''simple docstring'''
lowerCAmelCase__ = start
lowerCAmelCase__ = end
lowerCAmelCase__ = val
lowerCAmelCase__ = (start + end) // 2
lowerCAmelCase__ = left
lowerCAmelCase__ = right
def __repr__( self )-> Tuple:
'''simple docstring'''
return F"SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})"
class lowercase__ :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = collection
lowerCAmelCase__ = function
if self.collection:
lowerCAmelCase__ = self._build_tree(0 , len(__UpperCAmelCase ) - 1 )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> Tuple:
'''simple docstring'''
self._update_tree(self.root , __UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> str:
'''simple docstring'''
return self._query_range(self.root , __UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> List[str]:
'''simple docstring'''
if start == end:
return SegmentTreeNode(__UpperCAmelCase , __UpperCAmelCase , self.collection[start] )
lowerCAmelCase__ = (start + end) // 2
lowerCAmelCase__ = self._build_tree(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = self._build_tree(mid + 1 , __UpperCAmelCase )
return SegmentTreeNode(__UpperCAmelCase , __UpperCAmelCase , self.fn(left.val , right.val ) , __UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )-> Dict:
'''simple docstring'''
if node.start == i and node.end == i:
lowerCAmelCase__ = val
return
if i <= node.mid:
self._update_tree(node.left , __UpperCAmelCase , __UpperCAmelCase )
else:
self._update_tree(node.right , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = self.fn(node.left.val , node.right.val )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , __UpperCAmelCase , __UpperCAmelCase )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , __UpperCAmelCase , node.mid ) , self._query_range(node.right , node.mid + 1 , __UpperCAmelCase ) , )
else:
# range in right child tree
return self._query_range(node.right , __UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
if self.root is not None:
lowerCAmelCase__ = Queue()
queue.put(self.root )
while not queue.empty():
lowerCAmelCase__ = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('''*''' * 50)
a_ = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 340
|
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
a_ = (3, 9, -11, 0, 7, 5, 1, -1)
a_ = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class lowercase__ :
a_ =42
a_ =42
class lowercase__ :
def __init__( self , __UpperCAmelCase )-> None:
'''simple docstring'''
lowerCAmelCase__ = None
for i in sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ):
lowerCAmelCase__ = Node(__UpperCAmelCase , self.head )
def __iter__( self )-> Iterator[int]:
'''simple docstring'''
lowerCAmelCase__ = self.head
while node:
yield node.data
lowerCAmelCase__ = node.next_node
def __len__( self )-> int:
'''simple docstring'''
return sum(1 for _ in self )
def __str__( self )-> str:
'''simple docstring'''
return " -> ".join([str(__UpperCAmelCase ) for node in self] )
def _a ( UpperCamelCase_ : SortedLinkedList , UpperCamelCase_ : SortedLinkedList ) -> SortedLinkedList:
"""simple docstring"""
return SortedLinkedList(list(UpperCamelCase_ ) + list(UpperCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 340
| 1
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=__lowercase )
class lowerCAmelCase__ ( __lowercase ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
a__ : str = field(default="""question-answering-extractive""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
a__ : ClassVar[Features] = Features({"""question""": Value("""string""" ), """context""": Value("""string""" )} )
a__ : ClassVar[Features] = Features(
{
"""answers""": Sequence(
{
"""text""": Value("""string""" ),
"""answer_start""": Value("""int32""" ),
} )
} )
a__ : str = "question"
a__ : str = "context"
a__ : str = "answers"
@property
def __A ( self : str ) -> Dict[str, str]:
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 369
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Dict = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"vocab_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"
),
"squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt",
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli": (
"https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE__ : List[Any] = {
"squeezebert/squeezebert-uncased": 512,
"squeezebert/squeezebert-mnli": 512,
"squeezebert/squeezebert-mnli-headless": 512,
}
SCREAMING_SNAKE_CASE__ : Dict = {
"squeezebert/squeezebert-uncased": {"do_lower_case": True},
"squeezebert/squeezebert-mnli": {"do_lower_case": True},
"squeezebert/squeezebert-mnli-headless": {"do_lower_case": True},
}
class lowerCAmelCase__ ( __lowercase ):
a__ : Optional[int] = VOCAB_FILES_NAMES
a__ : Any = PRETRAINED_VOCAB_FILES_MAP
a__ : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : Optional[Any] = SqueezeBertTokenizer
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]="[UNK]" , SCREAMING_SNAKE_CASE__ : Optional[int]="[SEP]" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="[PAD]" , SCREAMING_SNAKE_CASE__ : Tuple="[CLS]" , SCREAMING_SNAKE_CASE__ : str="[MASK]" , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : int=None , **SCREAMING_SNAKE_CASE__ : Dict , ) -> Optional[Any]:
super().__init__(
SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
__lowerCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , SCREAMING_SNAKE_CASE__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , SCREAMING_SNAKE_CASE__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , SCREAMING_SNAKE_CASE__ ) != tokenize_chinese_chars
):
__lowerCamelCase = getattr(SCREAMING_SNAKE_CASE__ , normalizer_state.pop('''type''' ) )
__lowerCamelCase = do_lower_case
__lowerCamelCase = strip_accents
__lowerCamelCase = tokenize_chinese_chars
__lowerCamelCase = normalizer_class(**SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = do_lower_case
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str]=None ) -> str:
__lowerCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]:
__lowerCamelCase = [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Tuple[str]:
__lowerCamelCase = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
| 339
| 0
|
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _A ( A__ , A__=0.9_9_9 , A__="cosine" , ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(A__ ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(A__ ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F"Unsupported alpha_tranform_type: {alpha_transform_type}" )
__lowercase = []
for i in range(lowercase__ ):
__lowercase = i / num_diffusion_timesteps
__lowercase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowercase__ ) / alpha_bar_fn(lowercase__ ) , lowercase__ ) )
return torch.tensor(lowercase__ , dtype=torch.floataa )
class lowercase_ (A_ , A_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = [e.name for e in KarrasDiffusionSchedulers]
SCREAMING_SNAKE_CASE : int = 2
@register_to_config
def __init__( self : Optional[int] ,lowercase__ : Optional[Any] = 1_0_0_0 ,lowercase__ : Any = 0.0_0_0_8_5 ,lowercase__ : str = 0.0_1_2 ,lowercase__ : Tuple = "linear" ,lowercase__ : Optional[Any] = None ,lowercase__ : Optional[Any] = "epsilon" ,lowercase__ : Dict = "linspace" ,lowercase__ : Dict = 0 ,):
if trained_betas is not None:
__lowercase = torch.tensor(lowercase__ ,dtype=torch.floataa )
elif beta_schedule == "linear":
__lowercase = torch.linspace(lowercase__ ,lowercase__ ,lowercase__ ,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__lowercase = (
torch.linspace(beta_start**0.5 ,beta_end**0.5 ,lowercase__ ,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__lowercase = betas_for_alpha_bar(lowercase__ )
else:
raise NotImplementedError(F"{beta_schedule} does is not implemented for {self.__class__}" )
__lowercase = 1.0 - self.betas
__lowercase = torch.cumprod(self.alphas ,dim=0 )
# set all values
self.set_timesteps(lowercase__ ,lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Tuple ,lowercase__ : Optional[int]=None ):
if schedule_timesteps is None:
__lowercase = self.timesteps
__lowercase = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__lowercase = 1 if len(lowercase__ ) > 1 else 0
else:
__lowercase = timestep.cpu().item() if torch.is_tensor(lowercase__ ) else timestep
__lowercase = self._index_counter[timestep_int]
return indices[pos].item()
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : Any ,lowercase__ : List[str] ,):
__lowercase = self.index_for_timestep(lowercase__ )
if self.state_in_first_order:
__lowercase = self.sigmas[step_index]
else:
__lowercase = self.sigmas_interpol[step_index]
__lowercase = sample / ((sigma**2 + 1) ** 0.5)
return sample
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : str ,lowercase__ : Union[str, Any] = None ,lowercase__ : Dict = None ,):
__lowercase = num_inference_steps
__lowercase = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__lowercase = np.linspace(0 ,num_train_timesteps - 1 ,lowercase__ ,dtype=lowercase__ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__lowercase = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__lowercase = (np.arange(0 ,lowercase__ ) * step_ratio).round()[::-1].copy().astype(lowercase__ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__lowercase = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__lowercase = (np.arange(lowercase__ ,0 ,-step_ratio )).round().copy().astype(lowercase__ )
timesteps -= 1
else:
raise ValueError(
F"{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'." )
__lowercase = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__lowercase = torch.from_numpy(np.log(lowercase__ ) ).to(lowercase__ )
__lowercase = np.interp(lowercase__ ,np.arange(0 ,len(lowercase__ ) ) ,lowercase__ )
__lowercase = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__lowercase = torch.from_numpy(lowercase__ ).to(device=lowercase__ )
# interpolate sigmas
__lowercase = sigmas.log().lerp(sigmas.roll(1 ).log() ,0.5 ).exp()
__lowercase = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
__lowercase = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(lowercase__ ).startswith('''mps''' ):
# mps does not support float64
__lowercase = torch.from_numpy(lowercase__ ).to(lowercase__ ,dtype=torch.floataa )
else:
__lowercase = torch.from_numpy(lowercase__ ).to(lowercase__ )
# interpolate timesteps
__lowercase = self.sigma_to_t(lowercase__ ).to(lowercase__ ,dtype=timesteps.dtype )
__lowercase = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) ,dim=-1 ).flatten()
__lowercase = torch.cat([timesteps[:1], interleaved_timesteps] )
__lowercase = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__lowercase = defaultdict(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Union[str, Any] ):
# get log sigma
__lowercase = sigma.log()
# get distribution
__lowercase = log_sigma - self.log_sigmas[:, None]
# get sigmas range
__lowercase = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
__lowercase = low_idx + 1
__lowercase = self.log_sigmas[low_idx]
__lowercase = self.log_sigmas[high_idx]
# interpolate sigmas
__lowercase = (low - log_sigma) / (low - high)
__lowercase = w.clamp(0 ,1 )
# transform interpolation to time range
__lowercase = (1 - w) * low_idx + w * high_idx
__lowercase = t.view(sigma.shape )
return t
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ):
return self.sample is None
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Union[str, Any] ,lowercase__ : Tuple ,lowercase__ : Union[str, Any] ,lowercase__ : Tuple = True ,):
__lowercase = self.index_for_timestep(lowercase__ )
# advance index counter by 1
__lowercase = timestep.cpu().item() if torch.is_tensor(lowercase__ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__lowercase = self.sigmas[step_index]
__lowercase = self.sigmas_interpol[step_index + 1]
__lowercase = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
__lowercase = self.sigmas[step_index - 1]
__lowercase = self.sigmas_interpol[step_index]
__lowercase = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__lowercase = 0
__lowercase = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__lowercase = sigma_hat if self.state_in_first_order else sigma_interpol
__lowercase = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__lowercase = sigma_hat if self.state_in_first_order else sigma_interpol
__lowercase = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('''prediction_type not implemented yet: sample''' )
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__lowercase = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__lowercase = sigma_interpol - sigma_hat
# store for 2nd order step
__lowercase = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
__lowercase = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
__lowercase = sigma_next - sigma_hat
__lowercase = self.sample
__lowercase = None
__lowercase = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : str ,lowercase__ : Optional[int] ,lowercase__ : str ,):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__lowercase = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(lowercase__ ):
# mps does not support float64
__lowercase = self.timesteps.to(original_samples.device ,dtype=torch.floataa )
__lowercase = timesteps.to(original_samples.device ,dtype=torch.floataa )
else:
__lowercase = self.timesteps.to(original_samples.device )
__lowercase = timesteps.to(original_samples.device )
__lowercase = [self.index_for_timestep(lowercase__ ,lowercase__ ) for t in timesteps]
__lowercase = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__lowercase = sigma.unsqueeze(-1 )
__lowercase = original_samples + noise * sigma
return noisy_samples
def __len__( self : Any ):
return self.config.num_train_timesteps
| 104
|
from __future__ import annotations
def _lowerCamelCase( lowercase__ , lowercase__ ) -> bool:
'''simple docstring'''
__lowercase= get_failure_array(lowercase__ )
# 2) Step through text searching for pattern
__lowercase, __lowercase= 0, 0 # index into text, pattern
while i < len(lowercase__ ):
if pattern[j] == text[i]:
if j == (len(lowercase__ ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
__lowercase= failure[j - 1]
continue
i += 1
return False
def _lowerCamelCase( lowercase__ ) -> list[int]:
'''simple docstring'''
__lowercase= [0]
__lowercase= 0
__lowercase= 1
while j < len(lowercase__ ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
__lowercase= failure[i - 1]
continue
j += 1
failure.append(lowercase__ )
return failure
if __name__ == "__main__":
# Test 1)
lowerCAmelCase = '''abc1abc12'''
lowerCAmelCase = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
lowerCAmelCase = '''alskfjaldsk23adsfabcabc'''
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
lowerCAmelCase = '''ABABX'''
lowerCAmelCase = '''ABABZABABYABABX'''
assert kmp(pattern, text)
# Test 3)
lowerCAmelCase = '''AAAB'''
lowerCAmelCase = '''ABAAAAAB'''
assert kmp(pattern, text)
# Test 4)
lowerCAmelCase = '''abcdabcy'''
lowerCAmelCase = '''abcxabcdabxabcdabcdabcy'''
assert kmp(pattern, text)
# Test 5)
lowerCAmelCase = '''aabaabaaa'''
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 295
| 0
|
from __future__ import annotations
import os
from typing import Any
import requests
_UpperCAmelCase : int = """https://api.github.com"""
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
_UpperCAmelCase : Dict = BASE_URL + """/user"""
# https://github.com/settings/tokens
_UpperCAmelCase : Optional[Any] = os.environ.get("""USER_TOKEN""", """""")
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = {
'Authorization': F'''token {auth_token}''',
'Accept': 'application/vnd.github.v3+json',
}
return requests.get(UpperCamelCase__ , headers=UpperCamelCase__ ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F'''{key}: {value}''')
else:
raise ValueError("""'USER_TOKEN' field cannot be empty.""")
| 200
|
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class lowercase :
def __init__( self , snake_case , snake_case=99 , snake_case=13 , snake_case=7 , snake_case=9 , snake_case=True , snake_case=True , snake_case=False , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=37 , snake_case=8 , snake_case=0.1 , snake_case=0.0_02 , snake_case=1 , snake_case=0 , snake_case=0 , snake_case=None , snake_case=None , ):
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = encoder_seq_length
snake_case_ = decoder_seq_length
# For common tests
snake_case_ = self.decoder_seq_length
snake_case_ = is_training
snake_case_ = use_attention_mask
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = d_ff
snake_case_ = relative_attention_num_buckets
snake_case_ = dropout_rate
snake_case_ = initializer_factor
snake_case_ = eos_token_id
snake_case_ = pad_token_id
snake_case_ = decoder_start_token_id
snake_case_ = None
snake_case_ = decoder_layers
def a ( self ):
return TaConfig.from_pretrained('google/umt5-base' )
def a ( self , snake_case , snake_case , snake_case , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , ):
if attention_mask is None:
snake_case_ = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
snake_case_ = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
snake_case_ = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=snake_case )
if decoder_head_mask is None:
snake_case_ = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=snake_case )
if cross_attn_head_mask is None:
snake_case_ = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=snake_case )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def a ( self ):
snake_case_ = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
snake_case_ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
snake_case_ = input_ids.clamp(self.pad_token_id + 1 )
snake_case_ = decoder_input_ids.clamp(self.pad_token_id + 1 )
snake_case_ = self.get_config()
snake_case_ = config.num_attention_heads
snake_case_ = self.prepare_inputs_dict(snake_case , snake_case , snake_case )
return config, input_dict
def a ( self ):
snake_case_ , snake_case_ = self.prepare_config_and_inputs()
return config, inputs_dict
def a ( self ):
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def a ( self ):
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
snake_case_ = UMTaModel(config=snake_case )
model.to(snake_case )
model.eval()
snake_case_ = model(
input_ids=snake_case , decoder_input_ids=snake_case , attention_mask=snake_case , decoder_attention_mask=snake_case , )
snake_case_ = model(input_ids=snake_case , decoder_input_ids=snake_case )
snake_case_ = result.last_hidden_state
snake_case_ = result.past_key_values
snake_case_ = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(snake_case ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
snake_case_ = UMTaModel(config=snake_case ).get_decoder().to(snake_case ).eval()
# first forward pass
snake_case_ = model(snake_case , use_cache=snake_case )
snake_case_ = model(snake_case )
snake_case_ = model(snake_case , use_cache=snake_case )
self.parent.assertTrue(len(snake_case ) == len(snake_case ) )
self.parent.assertTrue(len(snake_case ) == len(snake_case ) + 1 )
snake_case_ , snake_case_ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case_ = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
snake_case_ = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case_ = model(snake_case )['last_hidden_state']
snake_case_ = model(snake_case , past_key_values=snake_case )['last_hidden_state']
# select random slice
snake_case_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case_ = output_from_no_past[:, -1, random_slice_idx].detach()
snake_case_ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case , snake_case , atol=1e-3 ) )
def a ( self , snake_case , snake_case , ):
snake_case_ = UMTaModel(config=snake_case ).to(snake_case ).half().eval()
snake_case_ = model(**snake_case )['last_hidden_state']
self.parent.assertFalse(torch.isnan(snake_case ).any().item() )
@require_torch
class lowercase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
__SCREAMING_SNAKE_CASE : int = (UMTaForConditionalGeneration,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : Optional[int] = (
{
'''conversational''': UMTaForConditionalGeneration,
'''feature-extraction''': UMTaModel,
'''summarization''': UMTaForConditionalGeneration,
'''text2text-generation''': UMTaForConditionalGeneration,
'''translation''': UMTaForConditionalGeneration,
'''question-answering''': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : List[str] = True
__SCREAMING_SNAKE_CASE : str = False
__SCREAMING_SNAKE_CASE : int = False
__SCREAMING_SNAKE_CASE : Optional[int] = True
__SCREAMING_SNAKE_CASE : Any = True
# The small UMT5 model needs higher percentages for CPU/MP tests
__SCREAMING_SNAKE_CASE : List[str] = [0.8, 0.9]
def a ( self ):
snake_case_ = UMTaModelTester(self )
@unittest.skip('Test has a segmentation fault on torch 1.8.0' )
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
snake_case_ = UMTaModel(config_and_inputs[0] ).to(snake_case )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
snake_case , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F'''{tmpdirname}/t5_test.onnx''' , export_params=snake_case , opset_version=9 , input_names=['input_ids', 'decoder_input_ids'] , )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*snake_case )
def a ( self ):
snake_case_ = ['encoder_attentions', 'decoder_attentions', 'cross_attentions']
snake_case_ = self.model_tester.prepare_config_and_inputs()
snake_case_ = config_and_inputs[0]
snake_case_ = UMTaForConditionalGeneration(snake_case ).eval()
model.to(snake_case )
snake_case_ = {
'head_mask': torch.zeros(config.num_layers , config.num_heads , device=snake_case ),
'decoder_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=snake_case ),
'cross_attn_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=snake_case ),
}
for attn_name, (name, mask) in zip(snake_case , head_masking.items() ):
snake_case_ = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
snake_case_ = torch.ones(
config.num_decoder_layers , config.num_heads , device=snake_case )
snake_case_ = model.generate(
config_and_inputs[1]['input_ids'] , num_beams=1 , max_length=3 , output_attentions=snake_case , return_dict_in_generate=snake_case , **snake_case , )
# We check the state of decoder_attentions and cross_attentions just from the last step
snake_case_ = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.' )
def a ( self ):
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase ):
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged' )
def a ( self ):
snake_case_ = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' , return_dict=snake_case ).to(snake_case )
snake_case_ = AutoTokenizer.from_pretrained('google/umt5-small' , use_fast=snake_case , legacy=snake_case )
snake_case_ = [
'Bonjour monsieur <extra_id_0> bien <extra_id_1>.',
'No se como puedo <extra_id_0>.',
'This is the reason why we <extra_id_0> them.',
'The <extra_id_0> walks in <extra_id_1>, seats',
'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.',
]
snake_case_ = tokenizer(snake_case , return_tensors='pt' , padding=snake_case ).input_ids
# fmt: off
snake_case_ = torch.tensor(
[
[ 3_8530, 21_0703, 25_6299, 1410, 25_6298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 2_5922, 25_6299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 1_9014, 1_0620, 758, 25_6299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 25_6299, 1_4869, 281, 301, 25_6298, 275, 11_9983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 25_6299, 1_4869, 281, 2234, 289, 2275, 333,6_1391, 289, 25_6298, 543, 25_6297, 16_8714, 329, 25_6296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(snake_case , snake_case )
snake_case_ = model.generate(input_ids.to(snake_case ) )
snake_case_ = [
'<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>',
'<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
]
snake_case_ = tokenizer.batch_decode(snake_case )
self.assertEqual(snake_case , snake_case )
| 200
| 1
|
from __future__ import annotations
from math import pow, sqrt
def lowerCAmelCase_ ( __A, __A, __A ) -> dict[str, float]:
'''simple docstring'''
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance == 0:
return {"resistance": sqrt(pow(__A, 2 ) - pow(__A, 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(__A, 2 ) - pow(__A, 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(__A, 2 ) + pow(__A, 2 ) )}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65
|
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : torch.FloatTensor
__UpperCAmelCase : Optional[torch.FloatTensor] = None
def lowerCAmelCase_ ( __A, __A=0.999, __A="cosine", ) -> Tuple:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(__A ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__A ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
UpperCAmelCase__ = []
for i in range(__A ):
UpperCAmelCase__ = i / num_diffusion_timesteps
UpperCAmelCase__ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__A ) / alpha_bar_fn(__A ), __A ) )
return torch.tensor(__A, dtype=torch.floataa )
class A ( UpperCAmelCase_ , UpperCAmelCase_ ):
@register_to_config
def __init__(self : List[str] , __UpperCAmelCase : int = 1_0_0_0 , __UpperCAmelCase : str = "fixed_small_log" , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[float] = 1.0 , __UpperCAmelCase : str = "epsilon" , __UpperCAmelCase : str = "squaredcos_cap_v2" , ) -> Optional[int]:
"""simple docstring"""
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" )
UpperCAmelCase__ = betas_for_alpha_bar(__UpperCAmelCase )
UpperCAmelCase__ = 1.0 - self.betas
UpperCAmelCase__ = torch.cumprod(self.alphas , dim=0 )
UpperCAmelCase__ = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
UpperCAmelCase__ = 1.0
# setable values
UpperCAmelCase__ = None
UpperCAmelCase__ = torch.from_numpy(np.arange(0 , __UpperCAmelCase )[::-1].copy() )
UpperCAmelCase__ = variance_type
def lowercase_ (self : List[str] , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : Optional[int] = None ) -> torch.FloatTensor:
"""simple docstring"""
return sample
def lowercase_ (self : int , __UpperCAmelCase : int , __UpperCAmelCase : Union[str, torch.device] = None ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = num_inference_steps
UpperCAmelCase__ = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
UpperCAmelCase__ = (np.arange(0 , __UpperCAmelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa )
UpperCAmelCase__ = torch.from_numpy(__UpperCAmelCase ).to(__UpperCAmelCase )
def lowercase_ (self : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : List[str]=None ) -> Tuple:
"""simple docstring"""
if prev_timestep is None:
UpperCAmelCase__ = t - 1
UpperCAmelCase__ = self.alphas_cumprod[t]
UpperCAmelCase__ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
UpperCAmelCase__ = 1 - alpha_prod_t
UpperCAmelCase__ = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
UpperCAmelCase__ = self.betas[t]
else:
UpperCAmelCase__ = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCAmelCase__ = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
UpperCAmelCase__ = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
UpperCAmelCase__ = torch.log(torch.clamp(__UpperCAmelCase , min=1E-20 ) )
UpperCAmelCase__ = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
UpperCAmelCase__ = variance.log()
UpperCAmelCase__ = beta.log()
UpperCAmelCase__ = (predicted_variance + 1) / 2
UpperCAmelCase__ = frac * max_log + (1 - frac) * min_log
return variance
def lowercase_ (self : Optional[int] , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : int , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : bool = True , ) -> Union[UnCLIPSchedulerOutput, Tuple]:
"""simple docstring"""
UpperCAmelCase__ = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
UpperCAmelCase__ , UpperCAmelCase__ = torch.split(__UpperCAmelCase , sample.shape[1] , dim=1 )
else:
UpperCAmelCase__ = None
# 1. compute alphas, betas
if prev_timestep is None:
UpperCAmelCase__ = t - 1
UpperCAmelCase__ = self.alphas_cumprod[t]
UpperCAmelCase__ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
UpperCAmelCase__ = 1 - alpha_prod_t
UpperCAmelCase__ = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
UpperCAmelCase__ = self.betas[t]
UpperCAmelCase__ = self.alphas[t]
else:
UpperCAmelCase__ = 1 - alpha_prod_t / alpha_prod_t_prev
UpperCAmelCase__ = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCAmelCase__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCAmelCase__ = model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"""
" for the UnCLIPScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCAmelCase__ = torch.clamp(
__UpperCAmelCase , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
UpperCAmelCase__ = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
UpperCAmelCase__ = 0
if t > 0:
UpperCAmelCase__ = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=__UpperCAmelCase , device=model_output.device )
UpperCAmelCase__ = self._get_variance(
__UpperCAmelCase , predicted_variance=__UpperCAmelCase , prev_timestep=__UpperCAmelCase , )
if self.variance_type == "fixed_small_log":
UpperCAmelCase__ = variance
elif self.variance_type == "learned_range":
UpperCAmelCase__ = (0.5 * variance).exp()
else:
raise ValueError(
f"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"""
" for the UnCLIPScheduler." )
UpperCAmelCase__ = variance * variance_noise
UpperCAmelCase__ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=__UpperCAmelCase , pred_original_sample=__UpperCAmelCase )
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : torch.IntTensor , ) -> torch.FloatTensor:
"""simple docstring"""
UpperCAmelCase__ = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
UpperCAmelCase__ = timesteps.to(original_samples.device )
UpperCAmelCase__ = alphas_cumprod[timesteps] ** 0.5
UpperCAmelCase__ = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
UpperCAmelCase__ = sqrt_alpha_prod.unsqueeze(-1 )
UpperCAmelCase__ = (1 - alphas_cumprod[timesteps]) ** 0.5
UpperCAmelCase__ = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
UpperCAmelCase__ = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
UpperCAmelCase__ = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 65
| 1
|
'''simple docstring'''
from __future__ import annotations
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance < 0:
raise ValueError("Resistance cannot be negative" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 37
|
'''simple docstring'''
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
lowerCAmelCase__ : Dict = get_tests_dir("fixtures/dummy-config.json")
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
__UpperCAmelCase : str = 0
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("transformers.models.auto" ) )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
__UpperCAmelCase : List[str] = AutoConfig.from_pretrained("bert-base-uncased" )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase : Tuple = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
__UpperCAmelCase : Any = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = AutoConfig.for_model("roberta" )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
__UpperCAmelCase : int = os.path.join(UpperCAmelCase_ , "fake-roberta" )
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
with open(os.path.join(UpperCAmelCase_ , "config.json" ) , "w" ) as f:
f.write(json.dumps({} ) )
__UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertEqual(type(UpperCAmelCase_ ) , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
try:
AutoConfig.register("custom" , UpperCAmelCase_ )
# Wrong model type will raise an error
with self.assertRaises(UpperCAmelCase_ ):
AutoConfig.register("model" , UpperCAmelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCAmelCase_ ):
AutoConfig.register("bert" , UpperCAmelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
__UpperCAmelCase : List[str] = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase_ )
__UpperCAmelCase : Dict = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
with self.assertRaisesRegex(
UpperCAmelCase_ , "bert-base is not a local folder and is not a valid model identifier" ):
__UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained("bert-base" )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
with self.assertRaisesRegex(
UpperCAmelCase_ , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
__UpperCAmelCase : int = AutoConfig.from_pretrained(UpperCAmelCase_ , revision="aaaaaa" )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
with self.assertRaisesRegex(
UpperCAmelCase_ , "hf-internal-testing/no-config-test-repo does not appear to have a file named config.json." , ):
__UpperCAmelCase : Tuple = AutoConfig.from_pretrained("hf-internal-testing/no-config-test-repo" )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCAmelCase_ ):
__UpperCAmelCase : int = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCAmelCase_ ):
__UpperCAmelCase : Any = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=UpperCAmelCase_ )
__UpperCAmelCase : Optional[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=UpperCAmelCase_ )
self.assertEqual(config.__class__.__name__ , "NewModelConfig" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase_ )
__UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(UpperCAmelCase_ , trust_remote_code=UpperCAmelCase_ )
self.assertEqual(reloaded_config.__class__.__name__ , "NewModelConfig" )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = '''new-model'''
try:
AutoConfig.register("new-model" , UpperCAmelCase_ )
# If remote code is not set, the default is to use local
__UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" )
# If remote code is disabled, we load the local one.
__UpperCAmelCase : int = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=UpperCAmelCase_ )
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" )
# If remote is enabled, we load from the Hub
__UpperCAmelCase : Tuple = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=UpperCAmelCase_ )
self.assertEqual(config.__class__.__name__ , "NewModelConfig" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 37
| 1
|
from __future__ import annotations
from random import random
class A :
def __init__( self, UpperCamelCase__ = None ):
"""simple docstring"""
lowerCAmelCase_ = value
lowerCAmelCase_ = random()
lowerCAmelCase_ = None
lowerCAmelCase_ = None
def __repr__( self ):
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return f"'{self.value}: {self.prior:.5}'"
else:
return pformat(
{f"{self.value}: {self.prior:.5}": (self.left, self.right)}, indent=1 )
def __str__( self ):
"""simple docstring"""
lowerCAmelCase_ = str(self.value ) + ''' '''
lowerCAmelCase_ = str(self.left or '''''' )
lowerCAmelCase_ = str(self.right or '''''' )
return value + left + right
def __UpperCamelCase ( _A , _A ):
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
lowerCAmelCase_ , lowerCAmelCase_ = split(root.left , _A )
return left, root
else:
lowerCAmelCase_ , lowerCAmelCase_ = split(root.right , _A )
return root, right
def __UpperCamelCase ( _A , _A ):
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
lowerCAmelCase_ = merge(left.right , _A )
return left
else:
lowerCAmelCase_ = merge(_A , right.left )
return right
def __UpperCamelCase ( _A , _A ):
lowerCAmelCase_ = Node(_A )
lowerCAmelCase_ , lowerCAmelCase_ = split(_A , _A )
return merge(merge(_A , _A ) , _A )
def __UpperCamelCase ( _A , _A ):
lowerCAmelCase_ , lowerCAmelCase_ = split(_A , value - 1 )
lowerCAmelCase_ , lowerCAmelCase_ = split(_A , _A )
return merge(_A , _A )
def __UpperCamelCase ( _A ):
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=''',''' )
inorder(root.right )
def __UpperCamelCase ( _A , _A ):
for arg in args.split():
if arg[0] == "+":
lowerCAmelCase_ = insert(_A , int(arg[1:] ) )
elif arg[0] == "-":
lowerCAmelCase_ = erase(_A , int(arg[1:] ) )
else:
print('''Unknown command''' )
return root
def __UpperCamelCase ( ):
lowerCAmelCase_ = None
print(
'''enter numbers to create a tree, + value to add value into treap, '''
'''- value to erase all nodes with value. \'q\' to quit. ''' )
lowerCAmelCase_ = input()
while args != "q":
lowerCAmelCase_ = interact_treap(_A , _A )
print(_A )
lowerCAmelCase_ = input()
print('''good by!''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 278
|
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __UpperCamelCase ( _A , _A ):
assert isinstance(_A , _A )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = tmp_path / '''cache'''
lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase_ = JsonDatasetReader(_A , cache_dir=_A , keep_in_memory=_A ).read()
_check_json_dataset(_A , _A )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = tmp_path / '''cache'''
lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase_ = features.copy() if features else default_expected_features
lowerCAmelCase_ = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase_ = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read()
_check_json_dataset(_A , _A )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = tmp_path / '''cache'''
lowerCAmelCase_ = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
lowerCAmelCase_ = features.copy() if features else default_expected_features
lowerCAmelCase_ = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase_ = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read()
assert isinstance(_A , _A )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def __UpperCamelCase ( _A , _A ):
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
lowerCAmelCase_ = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
lowerCAmelCase_ = features.copy()
lowerCAmelCase_ = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase_ = tmp_path / '''cache'''
lowerCAmelCase_ = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read()
assert isinstance(_A , _A )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = tmp_path / '''cache'''
lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase_ = JsonDatasetReader(_A , cache_dir=_A , split=_A ).read()
_check_json_dataset(_A , _A )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def __UpperCamelCase ( _A , _A , _A ):
if issubclass(_A , _A ):
lowerCAmelCase_ = jsonl_path
elif issubclass(_A , _A ):
lowerCAmelCase_ = [jsonl_path]
lowerCAmelCase_ = tmp_path / '''cache'''
lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase_ = JsonDatasetReader(_A , cache_dir=_A ).read()
_check_json_dataset(_A , _A )
def __UpperCamelCase ( _A , _A , _A=("train",) ):
assert isinstance(_A , _A )
for split in splits:
lowerCAmelCase_ = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = tmp_path / '''cache'''
lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase_ = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=_A , keep_in_memory=_A ).read()
_check_json_datasetdict(_A , _A )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = tmp_path / '''cache'''
lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase_ = features.copy() if features else default_expected_features
lowerCAmelCase_ = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase_ = JsonDatasetReader({'''train''': jsonl_path} , features=_A , cache_dir=_A ).read()
_check_json_datasetdict(_A , _A )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __UpperCamelCase ( _A , _A , _A ):
if split:
lowerCAmelCase_ = {split: jsonl_path}
else:
lowerCAmelCase_ = '''train'''
lowerCAmelCase_ = {'''train''': jsonl_path, '''test''': jsonl_path}
lowerCAmelCase_ = tmp_path / '''cache'''
lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase_ = JsonDatasetReader(_A , cache_dir=_A ).read()
_check_json_datasetdict(_A , _A , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __UpperCamelCase ( _A ):
return json.load(_A )
def __UpperCamelCase ( _A ):
return [json.loads(_A ) for line in buffer]
class A :
@pytest.mark.parametrize('''lines, load_json_function''', [(True, load_json_lines), (False, load_json)] )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, lines=UpperCamelCase__ ).write()
buffer.seek(0 )
lowerCAmelCase_ = load_json_function(UpperCamelCase__ )
assert isinstance(UpperCamelCase__, UpperCamelCase__ )
assert isinstance(exported_content[0], UpperCamelCase__ )
assert len(UpperCamelCase__ ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''', [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
], )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, lines=UpperCamelCase__, orient=UpperCamelCase__ ).write()
buffer.seek(0 )
lowerCAmelCase_ = load_json(UpperCamelCase__ )
assert isinstance(UpperCamelCase__, UpperCamelCase__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase__, '''keys''' ) and not hasattr(exported_content[0], '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase__ ) == 10
@pytest.mark.parametrize('''lines, load_json_function''', [(True, load_json_lines), (False, load_json)] )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, lines=UpperCamelCase__, num_proc=2 ).write()
buffer.seek(0 )
lowerCAmelCase_ = load_json_function(UpperCamelCase__ )
assert isinstance(UpperCamelCase__, UpperCamelCase__ )
assert isinstance(exported_content[0], UpperCamelCase__ )
assert len(UpperCamelCase__ ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''', [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
], )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, lines=UpperCamelCase__, orient=UpperCamelCase__, num_proc=2 ).write()
buffer.seek(0 )
lowerCAmelCase_ = load_json(UpperCamelCase__ )
assert isinstance(UpperCamelCase__, UpperCamelCase__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase__, '''keys''' ) and not hasattr(exported_content[0], '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase__ ) == 10
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
with pytest.raises(UpperCamelCase__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, num_proc=0 )
@pytest.mark.parametrize('''compression, extension''', [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / f"test.json.{extension}"
lowerCAmelCase_ = str(shared_datadir / f"test_file.json.{extension}" )
JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, compression=UpperCamelCase__ ).write()
with fsspec.open(UpperCamelCase__, '''rb''', compression='''infer''' ) as f:
lowerCAmelCase_ = f.read()
with fsspec.open(UpperCamelCase__, '''rb''', compression='''infer''' ) as f:
lowerCAmelCase_ = f.read()
assert exported_content == original_content
| 278
| 1
|
'''simple docstring'''
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
__snake_case ={
"""susnato/ernie-m-base_pytorch""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json""",
"""susnato/ernie-m-large_pytorch""": """https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json""",
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : str = '''ernie_m'''
lowerCamelCase : Any = {'''dropout''': '''classifier_dropout''', '''num_classes''': '''num_labels'''}
def __init__( self : Optional[Any] , UpperCAmelCase__ : int = 2_5_0_0_0_2 , UpperCAmelCase__ : int = 7_6_8 , UpperCAmelCase__ : int = 1_2 , UpperCAmelCase__ : int = 1_2 , UpperCAmelCase__ : int = 3_0_7_2 , UpperCAmelCase__ : str = "gelu" , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : int = 5_1_4 , UpperCAmelCase__ : float = 0.02 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : float = 1E-05 , UpperCAmelCase__ : str=None , UpperCAmelCase__ : List[str]=False , UpperCAmelCase__ : Any=0.0 , **UpperCAmelCase__ : Optional[int] , ) -> str:
super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = classifier_dropout
lowerCAmelCase = is_decoder
lowerCAmelCase = act_dropout
| 354
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__snake_case ={
"""configuration_swiftformer""": [
"""SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SwiftFormerConfig""",
"""SwiftFormerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case =[
"""SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwiftFormerForImageClassification""",
"""SwiftFormerModel""",
"""SwiftFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
__snake_case =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 55
| 0
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class __snake_case ( unittest.TestCase ):
def __a ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp()
# fmt: off
SCREAMING_SNAKE_CASE__ = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
SCREAMING_SNAKE_CASE__ = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
SCREAMING_SNAKE_CASE__ = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
SCREAMING_SNAKE_CASE__ = {"""unk_token""": """<unk>"""}
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowercase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_lowercase ) )
SCREAMING_SNAKE_CASE__ = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
"""image_std""": [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , _lowercase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(_lowercase , _lowercase )
def __a ( self : Union[str, Any] , **_lowercase : Optional[Any] ):
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def __a ( self : Optional[int] , **_lowercase : Tuple ):
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_lowercase )
def __a ( self : Union[str, Any] , **_lowercase : Any ):
"""simple docstring"""
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **_lowercase )
def __a ( self : Dict ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __a ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ = [Image.fromarray(np.moveaxis(_lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __a ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = CLIPProcessor(tokenizer=_lowercase , image_processor=_lowercase )
processor_slow.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_lowercase )
SCREAMING_SNAKE_CASE__ = CLIPProcessor(tokenizer=_lowercase , image_processor=_lowercase )
processor_fast.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _lowercase )
self.assertIsInstance(processor_fast.tokenizer , _lowercase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _lowercase )
self.assertIsInstance(processor_fast.image_processor , _lowercase )
def __a ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
SCREAMING_SNAKE_CASE__ = self.get_image_processor(do_normalize=_lowercase , padding_value=1.0 )
SCREAMING_SNAKE_CASE__ = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowercase )
def __a ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = CLIPProcessor(tokenizer=_lowercase , image_processor=_lowercase )
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = image_processor(_lowercase , return_tensors="""np""" )
SCREAMING_SNAKE_CASE__ = processor(images=_lowercase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = CLIPProcessor(tokenizer=_lowercase , image_processor=_lowercase )
SCREAMING_SNAKE_CASE__ = """lower newer"""
SCREAMING_SNAKE_CASE__ = processor(text=_lowercase )
SCREAMING_SNAKE_CASE__ = tokenizer(_lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = CLIPProcessor(tokenizer=_lowercase , image_processor=_lowercase )
SCREAMING_SNAKE_CASE__ = """lower newer"""
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=_lowercase , images=_lowercase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(_lowercase ):
processor()
def __a ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = CLIPProcessor(tokenizer=_lowercase , image_processor=_lowercase )
SCREAMING_SNAKE_CASE__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE__ = processor.batch_decode(_lowercase )
SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = CLIPProcessor(tokenizer=_lowercase , image_processor=_lowercase )
SCREAMING_SNAKE_CASE__ = """lower newer"""
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=_lowercase , images=_lowercase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 219
|
import sys
from collections import defaultdict
class __lowerCAmelCase :
def __init__( self : int) -> str:
"""simple docstring"""
_UpperCAmelCase = []
def _lowerCamelCase ( self : Any , A : List[str]) -> int:
"""simple docstring"""
return self.node_position[vertex]
def _lowerCamelCase ( self : Optional[Any] , A : Optional[int] , A : str) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = pos
def _lowerCamelCase ( self : Tuple , A : Tuple , A : Dict , A : List[str] , A : Optional[Any]) -> Dict:
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
_UpperCAmelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
_UpperCAmelCase = 2 * start + 1
else:
_UpperCAmelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
_UpperCAmelCase , _UpperCAmelCase = heap[smallest_child], positions[smallest_child]
_UpperCAmelCase , _UpperCAmelCase = (
heap[start],
positions[start],
)
_UpperCAmelCase , _UpperCAmelCase = temp, tempa
_UpperCAmelCase = self.get_position(positions[smallest_child])
self.set_position(
positions[smallest_child] , self.get_position(positions[start]))
self.set_position(positions[start] , A)
self.top_to_bottom(A , A , A , A)
def _lowerCamelCase ( self : Optional[int] , A : str , A : Optional[Any] , A : Optional[int] , A : str) -> Any:
"""simple docstring"""
_UpperCAmelCase = position[index]
while index != 0:
_UpperCAmelCase = int((index - 2) / 2) if index % 2 == 0 else int((index - 1) / 2)
if val < heap[parent]:
_UpperCAmelCase = heap[parent]
_UpperCAmelCase = position[parent]
self.set_position(position[parent] , A)
else:
_UpperCAmelCase = val
_UpperCAmelCase = temp
self.set_position(A , A)
break
_UpperCAmelCase = parent
else:
_UpperCAmelCase = val
_UpperCAmelCase = temp
self.set_position(A , 0)
def _lowerCamelCase ( self : Union[str, Any] , A : Optional[int] , A : Tuple) -> str:
"""simple docstring"""
_UpperCAmelCase = len(A) // 2 - 1
for i in range(A , -1 , -1):
self.top_to_bottom(A , A , len(A) , A)
def _lowerCamelCase ( self : Optional[int] , A : int , A : str) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = positions[0]
_UpperCAmelCase = sys.maxsize
self.top_to_bottom(A , 0 , len(A) , A)
return temp
def A ( _UpperCAmelCase : int ) -> Any:
'''simple docstring'''
_UpperCAmelCase = Heap()
_UpperCAmelCase = [0] * len(_UpperCAmelCase )
_UpperCAmelCase = [-1] * len(_UpperCAmelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
_UpperCAmelCase = [] # Heap of Distance of vertices from their neighboring vertex
_UpperCAmelCase = []
for vertex in range(len(_UpperCAmelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(_UpperCAmelCase )
heap.node_position.append(_UpperCAmelCase )
_UpperCAmelCase = []
_UpperCAmelCase = 1
_UpperCAmelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
_UpperCAmelCase = 0
_UpperCAmelCase = distance
heap.heapify(_UpperCAmelCase , _UpperCAmelCase )
for _ in range(1 , len(_UpperCAmelCase ) ):
_UpperCAmelCase = heap.delete_minimum(_UpperCAmelCase , _UpperCAmelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
_UpperCAmelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_UpperCAmelCase )]
):
_UpperCAmelCase = distance
heap.bottom_to_top(
_UpperCAmelCase , heap.get_position(_UpperCAmelCase ) , _UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
UpperCAmelCase__ = int(input("Enter number of edges: ").strip())
UpperCAmelCase__ = defaultdict(list)
for _ in range(edges_number):
UpperCAmelCase__ = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 339
| 0
|
'''simple docstring'''
from __future__ import annotations
from scipy.special import comb # type: ignore
class __magic_name__ :
def __init__( self : Union[str, Any] , lowercase_ : list[tuple[float, float]] ):
lowercase_ : int = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
lowercase_ : Dict = len(lowercase_ ) - 1
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : float ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
lowercase_ : list[float] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , lowercase_ ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(lowercase_ ) , 5 ) == 1
return output_values
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : float ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
lowercase_ : List[str] = self.basis_function(lowercase_ )
lowercase_ : Tuple = 0.0
lowercase_ : Dict = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : float = 0.01 ):
from matplotlib import pyplot as plt # type: ignore
lowercase_ : list[float] = [] # x coordinates of points to plot
lowercase_ : list[float] = [] # y coordinates of points to plot
lowercase_ : Tuple = 0.0
while t <= 1:
lowercase_ : Any = self.bezier_curve_function(lowercase_ )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
lowercase_ : Any = [i[0] for i in self.list_of_points]
lowercase_ : List[str] = [i[1] for i in self.list_of_points]
plt.plot(
lowercase_ , lowercase_ , color="""blue""" , label="""Curve of Degree """ + str(self.degree ) , )
plt.scatter(lowercase_ , lowercase_ , color="""red""" , label="""Control Points""" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 21
|
'''simple docstring'''
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class __magic_name__ ( ctypes.Structure):
# _fields is a specific attr expected by ctypes
UpperCamelCase__ = [('''size''', ctypes.c_int), ('''visible''', ctypes.c_byte)]
def lowerCamelCase ( ) -> List[Any]:
if os.name == "nt":
lowercase_ : List[Any] = CursorInfo()
lowercase_ : int = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) )
lowercase_ : List[str] = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25l""" )
sys.stdout.flush()
def lowerCamelCase ( ) -> str:
if os.name == "nt":
lowercase_ : int = CursorInfo()
lowercase_ : Optional[Any] = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) )
lowercase_ : Optional[int] = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25h""" )
sys.stdout.flush()
@contextmanager
def lowerCamelCase ( ) -> Any:
try:
hide_cursor()
yield
finally:
show_cursor()
| 21
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class lowercase__ :
'''simple docstring'''
def __init__( self , __snake_case , __snake_case=13 , __snake_case=7 , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=99 , __snake_case=32 , __snake_case=2 , __snake_case=4 , __snake_case=37 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=512 , __snake_case=16 , __snake_case=2 , __snake_case=0.02 , __snake_case=3 , __snake_case=4 , __snake_case=None , __snake_case=1000 , ):
_SCREAMING_SNAKE_CASE : Any = parent
_SCREAMING_SNAKE_CASE : Dict = batch_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = seq_length
_SCREAMING_SNAKE_CASE : Union[str, Any] = is_training
_SCREAMING_SNAKE_CASE : Union[str, Any] = use_input_mask
_SCREAMING_SNAKE_CASE : Any = use_token_type_ids
_SCREAMING_SNAKE_CASE : List[str] = use_labels
_SCREAMING_SNAKE_CASE : Any = vocab_size
_SCREAMING_SNAKE_CASE : Tuple = hidden_size
_SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
_SCREAMING_SNAKE_CASE : Dict = num_attention_heads
_SCREAMING_SNAKE_CASE : Tuple = intermediate_size
_SCREAMING_SNAKE_CASE : Dict = hidden_act
_SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : str = max_position_embeddings
_SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size
_SCREAMING_SNAKE_CASE : Optional[int] = type_sequence_label_size
_SCREAMING_SNAKE_CASE : int = initializer_range
_SCREAMING_SNAKE_CASE : Dict = num_labels
_SCREAMING_SNAKE_CASE : int = num_choices
_SCREAMING_SNAKE_CASE : Any = scope
_SCREAMING_SNAKE_CASE : str = range_bbox
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
_SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_SCREAMING_SNAKE_CASE : str = bbox[i, j, 3]
_SCREAMING_SNAKE_CASE : List[Any] = bbox[i, j, 1]
_SCREAMING_SNAKE_CASE : str = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_SCREAMING_SNAKE_CASE : Optional[Any] = bbox[i, j, 2]
_SCREAMING_SNAKE_CASE : Optional[int] = bbox[i, j, 0]
_SCREAMING_SNAKE_CASE : Dict = t
_SCREAMING_SNAKE_CASE : Union[str, Any] = tf.convert_to_tensor(__snake_case )
_SCREAMING_SNAKE_CASE : str = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE : str = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE : Dict = None
_SCREAMING_SNAKE_CASE : Tuple = None
_SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_labels:
_SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE : Dict = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ):
_SCREAMING_SNAKE_CASE : List[str] = TFLayoutLMModel(config=__snake_case )
_SCREAMING_SNAKE_CASE : Optional[Any] = model(__snake_case , __snake_case , attention_mask=__snake_case , token_type_ids=__snake_case )
_SCREAMING_SNAKE_CASE : Tuple = model(__snake_case , __snake_case , token_type_ids=__snake_case )
_SCREAMING_SNAKE_CASE : str = model(__snake_case , __snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase_ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ):
_SCREAMING_SNAKE_CASE : Any = TFLayoutLMForMaskedLM(config=__snake_case )
_SCREAMING_SNAKE_CASE : str = model(__snake_case , __snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ):
_SCREAMING_SNAKE_CASE : str = self.num_labels
_SCREAMING_SNAKE_CASE : Dict = TFLayoutLMForSequenceClassification(config=__snake_case )
_SCREAMING_SNAKE_CASE : Optional[int] = model(__snake_case , __snake_case , attention_mask=__snake_case , token_type_ids=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ):
_SCREAMING_SNAKE_CASE : Tuple = self.num_labels
_SCREAMING_SNAKE_CASE : List[str] = TFLayoutLMForTokenClassification(config=__snake_case )
_SCREAMING_SNAKE_CASE : Dict = model(__snake_case , __snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ):
_SCREAMING_SNAKE_CASE : Optional[int] = TFLayoutLMForQuestionAnswering(config=__snake_case )
_SCREAMING_SNAKE_CASE : Any = model(__snake_case , __snake_case , attention_mask=__snake_case , token_type_ids=__snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) : List[Any] = config_and_inputs
_SCREAMING_SNAKE_CASE : Any = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_tf
class lowercase__ ( _snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
A_ : Any = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
A_ : Union[str, Any] = (
{
"""feature-extraction""": TFLayoutLMModel,
"""fill-mask""": TFLayoutLMForMaskedLM,
"""text-classification""": TFLayoutLMForSequenceClassification,
"""token-classification""": TFLayoutLMForTokenClassification,
"""zero-shot""": TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
A_ : Union[str, Any] = False
A_ : int = True
A_ : Optional[int] = 10
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : List[Any] = TFLayoutLMModelTester(self )
_SCREAMING_SNAKE_CASE : List[Any] = ConfigTester(self , config_class=__snake_case , hidden_size=37 )
def UpperCAmelCase_ ( self ):
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__snake_case )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__snake_case )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__snake_case )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__snake_case )
@slow
def UpperCAmelCase_ ( self ):
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE : int = TFLayoutLMModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@unittest.skip("""Onnx compliancy broke with TF 2.10""" )
def UpperCAmelCase_ ( self ):
pass
def snake_case_ ( ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = tf.convert_to_tensor([[101,1019,1014,1016,1037,1_2849,4747,1004,1_4246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,1_1300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,1_9274,2772,6205,2_7814,1_6147,1_6147,4343,2047,1_0283,1_0969,1_4389,1012,2338,102]] ) # noqa: E231
_SCREAMING_SNAKE_CASE : Optional[int] = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
_SCREAMING_SNAKE_CASE : List[str] = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231
_SCREAMING_SNAKE_CASE : Any = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
_SCREAMING_SNAKE_CASE : Union[str, Any] = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = TFLayoutLMModel.from_pretrained("""microsoft/layoutlm-base-uncased""" )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = prepare_layoutlm_batch_inputs()
# forward pass
_SCREAMING_SNAKE_CASE : List[str] = model(input_ids=__snake_case , bbox=__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case )
# test the sequence output on [0, :3, :3]
_SCREAMING_SNAKE_CASE : Any = tf.convert_to_tensor(
[[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __snake_case , atol=1e-3 ) )
# test the pooled output on [1, :3]
_SCREAMING_SNAKE_CASE : Any = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , __snake_case , atol=1e-3 ) )
@slow
def UpperCAmelCase_ ( self ):
# initialize model with randomly initialized sequence classification head
_SCREAMING_SNAKE_CASE : int = TFLayoutLMForSequenceClassification.from_pretrained("""microsoft/layoutlm-base-uncased""" , num_labels=2 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = prepare_layoutlm_batch_inputs()
# forward pass
_SCREAMING_SNAKE_CASE : List[str] = model(
input_ids=__snake_case , bbox=__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
_SCREAMING_SNAKE_CASE : Tuple = outputs.loss
_SCREAMING_SNAKE_CASE : Optional[int] = (2,)
self.assertEqual(loss.shape , __snake_case )
# test the shape of the logits
_SCREAMING_SNAKE_CASE : List[Any] = outputs.logits
_SCREAMING_SNAKE_CASE : Tuple = (2, 2)
self.assertEqual(logits.shape , __snake_case )
@slow
def UpperCAmelCase_ ( self ):
# initialize model with randomly initialized token classification head
_SCREAMING_SNAKE_CASE : List[str] = TFLayoutLMForTokenClassification.from_pretrained("""microsoft/layoutlm-base-uncased""" , num_labels=13 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = prepare_layoutlm_batch_inputs()
# forward pass
_SCREAMING_SNAKE_CASE : Tuple = model(
input_ids=__snake_case , bbox=__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
# test the shape of the logits
_SCREAMING_SNAKE_CASE : int = outputs.logits
_SCREAMING_SNAKE_CASE : Tuple = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape , __snake_case )
@slow
def UpperCAmelCase_ ( self ):
# initialize model with randomly initialized token classification head
_SCREAMING_SNAKE_CASE : Optional[int] = TFLayoutLMForQuestionAnswering.from_pretrained("""microsoft/layoutlm-base-uncased""" )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Tuple = prepare_layoutlm_batch_inputs()
# forward pass
_SCREAMING_SNAKE_CASE : Any = model(input_ids=__snake_case , bbox=__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case )
# test the shape of the logits
_SCREAMING_SNAKE_CASE : Optional[Any] = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape , __snake_case )
self.assertEqual(outputs.end_logits.shape , __snake_case )
| 200
|
'''simple docstring'''
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class lowercase__ ( ctypes.Structure ):
'''simple docstring'''
A_ : Optional[Any] = [("""size""", ctypes.c_int), ("""visible""", ctypes.c_byte)]
def snake_case_ ( ):
"""simple docstring"""
if os.name == "nt":
_SCREAMING_SNAKE_CASE : Tuple = CursorInfo()
_SCREAMING_SNAKE_CASE : Tuple = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(SCREAMING_SNAKE_CASE__ , ctypes.byref(SCREAMING_SNAKE_CASE__ ) )
_SCREAMING_SNAKE_CASE : Optional[Any] = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(SCREAMING_SNAKE_CASE__ , ctypes.byref(SCREAMING_SNAKE_CASE__ ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25l""" )
sys.stdout.flush()
def snake_case_ ( ):
"""simple docstring"""
if os.name == "nt":
_SCREAMING_SNAKE_CASE : int = CursorInfo()
_SCREAMING_SNAKE_CASE : List[str] = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(SCREAMING_SNAKE_CASE__ , ctypes.byref(SCREAMING_SNAKE_CASE__ ) )
_SCREAMING_SNAKE_CASE : Tuple = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(SCREAMING_SNAKE_CASE__ , ctypes.byref(SCREAMING_SNAKE_CASE__ ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25h""" )
sys.stdout.flush()
@contextmanager
def snake_case_ ( ):
"""simple docstring"""
try:
hide_cursor()
yield
finally:
show_cursor()
| 200
| 1
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class _A :
"""simple docstring"""
def __init__( self : int , __UpperCAmelCase : Any , __UpperCAmelCase : List[Any]=13 , __UpperCAmelCase : Optional[int]=7 , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : Any=True , __UpperCAmelCase : Dict=True , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : Optional[Any]=99 , __UpperCAmelCase : str=[1, 1, 2] , __UpperCAmelCase : Tuple=1 , __UpperCAmelCase : int=32 , __UpperCAmelCase : List[Any]=4 , __UpperCAmelCase : List[Any]=8 , __UpperCAmelCase : Optional[int]=37 , __UpperCAmelCase : Tuple="gelu_new" , __UpperCAmelCase : int=0.1 , __UpperCAmelCase : Dict=0.1 , __UpperCAmelCase : Optional[int]=0.0 , __UpperCAmelCase : List[Any]=512 , __UpperCAmelCase : str=3 , __UpperCAmelCase : Any=0.02 , __UpperCAmelCase : List[str]=3 , __UpperCAmelCase : List[str]=4 , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Tuple=False , ):
a : Any = parent
a : Tuple = batch_size
a : List[str] = seq_length
a : Any = is_training
a : Union[str, Any] = use_input_mask
a : Dict = use_token_type_ids
a : Tuple = use_labels
a : Union[str, Any] = vocab_size
a : str = block_sizes
a : str = num_decoder_layers
a : List[Any] = d_model
a : Optional[Any] = n_head
a : Optional[Any] = d_head
a : Any = d_inner
a : Optional[int] = hidden_act
a : List[Any] = hidden_dropout
a : Dict = attention_dropout
a : Tuple = activation_dropout
a : int = max_position_embeddings
a : Optional[int] = type_vocab_size
a : str = 2
a : Optional[int] = num_labels
a : List[Any] = num_choices
a : List[Any] = scope
a : str = initializer_std
# Used in the tests to check the size of the first attention layer
a : Tuple = n_head
# Used in the tests to check the size of the first hidden state
a : Union[str, Any] = self.d_model
# Used in the tests to check the number of output hidden states/attentions
a : str = sum(self.block_sizes) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
a : str = self.num_hidden_layers + 2
def __snake_case ( self : Union[str, Any]):
a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a : Dict = None
if self.use_input_mask:
a : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length])
a : Tuple = None
if self.use_token_type_ids:
a : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
a : Any = None
a : Tuple = None
a : Optional[Any] = None
if self.use_labels:
a : int = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a : int = ids_tensor([self.batch_size] , self.num_choices)
a : Any = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def __snake_case ( self : Optional[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : int , __UpperCAmelCase : Dict , __UpperCAmelCase : str , __UpperCAmelCase : str , ):
a : Optional[int] = TFFunnelModel(config=__UpperCAmelCase)
a : List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
a : int = model(__UpperCAmelCase)
a : List[str] = [input_ids, input_mask]
a : Optional[int] = model(__UpperCAmelCase)
a : List[Any] = model(__UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
a : Optional[int] = False
a : Dict = TFFunnelModel(config=__UpperCAmelCase)
a : Any = model(__UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
a : List[str] = False
a : Optional[Any] = TFFunnelModel(config=__UpperCAmelCase)
a : int = model(__UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int] , ):
a : str = TFFunnelBaseModel(config=__UpperCAmelCase)
a : List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
a : Optional[int] = model(__UpperCAmelCase)
a : int = [input_ids, input_mask]
a : Any = model(__UpperCAmelCase)
a : Any = model(__UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model))
a : List[Any] = False
a : int = TFFunnelBaseModel(config=__UpperCAmelCase)
a : List[str] = model(__UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model))
a : Union[str, Any] = False
a : Optional[int] = TFFunnelBaseModel(config=__UpperCAmelCase)
a : List[Any] = model(__UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model))
def __snake_case ( self : Any , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Any , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple , __UpperCAmelCase : int , ):
a : Tuple = TFFunnelForPreTraining(config=__UpperCAmelCase)
a : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
a : Optional[Any] = model(__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length))
def __snake_case ( self : Dict , __UpperCAmelCase : str , __UpperCAmelCase : Any , __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict , __UpperCAmelCase : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : Any , ):
a : Union[str, Any] = TFFunnelForMaskedLM(config=__UpperCAmelCase)
a : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
a : Dict = model(__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def __snake_case ( self : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[Any] , ):
a : Union[str, Any] = self.num_labels
a : List[Any] = TFFunnelForSequenceClassification(config=__UpperCAmelCase)
a : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
a : Union[str, Any] = model(__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __snake_case ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[str] , ):
a : Dict = self.num_choices
a : Optional[int] = TFFunnelForMultipleChoice(config=__UpperCAmelCase)
a : str = tf.tile(tf.expand_dims(__UpperCAmelCase , 1) , (1, self.num_choices, 1))
a : Optional[Any] = tf.tile(tf.expand_dims(__UpperCAmelCase , 1) , (1, self.num_choices, 1))
a : Union[str, Any] = tf.tile(tf.expand_dims(__UpperCAmelCase , 1) , (1, self.num_choices, 1))
a : Optional[int] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
a : List[str] = model(__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def __snake_case ( self : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple , ):
a : Union[str, Any] = self.num_labels
a : Union[str, Any] = TFFunnelForTokenClassification(config=__UpperCAmelCase)
a : Any = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
a : Optional[int] = model(__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def __snake_case ( self : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict , __UpperCAmelCase : Any , ):
a : Dict = TFFunnelForQuestionAnswering(config=__UpperCAmelCase)
a : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
a : int = model(__UpperCAmelCase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def __snake_case ( self : Any):
a : int = self.prepare_config_and_inputs()
(
a
) : int = config_and_inputs
a : int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _A ( _a ,_a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : Optional[int] = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCAmelCase : Optional[int] = (
{
"""feature-extraction""": (TFFunnelBaseModel, TFFunnelModel),
"""fill-mask""": TFFunnelForMaskedLM,
"""question-answering""": TFFunnelForQuestionAnswering,
"""text-classification""": TFFunnelForSequenceClassification,
"""token-classification""": TFFunnelForTokenClassification,
"""zero-shot""": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase : List[str] = False
UpperCAmelCase : Any = False
def __snake_case ( self : Dict):
a : Optional[int] = TFFunnelModelTester(self)
a : str = ConfigTester(self , config_class=__UpperCAmelCase)
def __snake_case ( self : List[Any]):
self.config_tester.run_common_tests()
def __snake_case ( self : Optional[int]):
a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def __snake_case ( self : Tuple):
a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__UpperCAmelCase)
def __snake_case ( self : List[Any]):
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase)
def __snake_case ( self : Tuple):
a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase)
def __snake_case ( self : List[Any]):
a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase)
@require_tf
class _A ( _a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : List[Any] = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
UpperCAmelCase : Tuple = False
UpperCAmelCase : Dict = False
def __snake_case ( self : Union[str, Any]):
a : Optional[Any] = TFFunnelModelTester(self , base=__UpperCAmelCase)
a : int = ConfigTester(self , config_class=__UpperCAmelCase)
def __snake_case ( self : Optional[int]):
self.config_tester.run_common_tests()
def __snake_case ( self : Tuple):
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*__UpperCAmelCase)
def __snake_case ( self : List[str]):
a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase)
def __snake_case ( self : Union[str, Any]):
a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase)
| 364
|
"""simple docstring"""
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__lowercase = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
__lowercase = direct_transformers_import(PATH_TO_TRANSFORMERS)
__lowercase = transformers.models.auto.configuration_auto.CONFIG_MAPPING
__lowercase = {
# used to compute the property `self.chunk_length`
"""EncodecConfig""": ["""overlap"""],
# used as `self.bert_model = BertModel(config, ...)`
"""DPRConfig""": True,
# not used in modeling files, but it's an important information
"""FSMTConfig""": ["""langs"""],
# used internally in the configuration class file
"""GPTNeoConfig""": ["""attention_types"""],
# used internally in the configuration class file
"""EsmConfig""": ["""is_folding_model"""],
# used during training (despite we don't have training script for these models yet)
"""Mask2FormerConfig""": ["""ignore_value"""],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"""OneFormerConfig""": ["""ignore_value""", """norm"""],
# used during preprocessing and collation, see `collating_graphormer.py`
"""GraphormerConfig""": ["""spatial_pos_max"""],
# used internally in the configuration class file
"""T5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"""MT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
"""UMT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
# used internally in the configuration class file
"""LongT5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
"""SwitchTransformersConfig""": ["""feed_forward_proj"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""BioGptConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""GLPNConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""SegformerConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""CvtConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""PerceiverConfig""": ["""layer_norm_eps"""],
# used internally to calculate the feature size
"""InformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""TimeSeriesTransformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""AutoformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate `mlp_dim`
"""SamVisionConfig""": ["""mlp_ratio"""],
# For (head) training, but so far not implemented
"""ClapAudioConfig""": ["""num_classes"""],
# Not used, but providing useful information to users
"""SpeechT5HifiGanConfig""": ["""sampling_rate"""],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"""CLIPSegConfig""": True,
"""DeformableDetrConfig""": True,
"""DetaConfig""": True,
"""DinatConfig""": True,
"""DonutSwinConfig""": True,
"""EfficientFormerConfig""": True,
"""FSMTConfig""": True,
"""JukeboxConfig""": True,
"""LayoutLMv2Config""": True,
"""MaskFormerSwinConfig""": True,
"""MT5Config""": True,
"""NatConfig""": True,
"""OneFormerConfig""": True,
"""PerceiverConfig""": True,
"""RagConfig""": True,
"""SpeechT5Config""": True,
"""SwinConfig""": True,
"""Swin2SRConfig""": True,
"""Swinv2Config""": True,
"""SwitchTransformersConfig""": True,
"""TableTransformerConfig""": True,
"""TapasConfig""": True,
"""TransfoXLConfig""": True,
"""UniSpeechConfig""": True,
"""UniSpeechSatConfig""": True,
"""WavLMConfig""": True,
"""WhisperConfig""": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"""JukeboxPriorConfig""": True,
# TODO: @Younes (for `is_decoder`)
"""Pix2StructTextConfig""": True,
}
)
def lowercase ( A_ , A_ , A_ , A_ )-> Optional[Any]:
'''simple docstring'''
a : List[str] = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F'''config.{attribute}''' in modeling_source
or F'''getattr(config, "{attribute}"''' in modeling_source
or F'''getattr(self.config, "{attribute}"''' in modeling_source
):
a : str = True
# Deal with multi-line cases
elif (
re.search(
RF'''getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"''' , A_ , )
is not None
):
a : List[Any] = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
a : str = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
a : Tuple = [
"bos_index",
"eos_index",
"pad_index",
"unk_index",
"mask_index",
"image_size",
"use_cache",
"out_features",
"out_indices",
]
a : str = ["encoder_no_repeat_ngram_size"]
# Special cases to be allowed
a : int = True
if not attribute_used:
a : Dict = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
a : Optional[int] = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
a : List[Any] = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
a : str = True
elif attribute.endswith("_token_id" ):
a : str = True
# configuration class specific cases
if not case_allowed:
a : Union[str, Any] = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
a : Optional[Any] = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def lowercase ( A_ )-> Tuple:
'''simple docstring'''
a : Optional[int] = dict(inspect.signature(config_class.__init__ ).parameters )
a : Any = [x for x in list(signature.keys() ) if x not in ["self", "kwargs"]]
a : str = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
a : Dict = {}
if len(config_class.attribute_map ) > 0:
a : int = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
a : int = inspect.getsourcefile(A_ )
a : Union[str, Any] = os.path.dirname(A_ )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
a : Optional[Any] = [os.path.join(A_ , A_ ) for fn in os.listdir(A_ ) if fn.startswith("modeling_" )]
# Get the source code strings
a : Tuple = []
for path in modeling_paths:
if os.path.isfile(A_ ):
with open(A_ ) as fp:
modeling_sources.append(fp.read() )
a : Optional[Any] = []
for config_param, default_value in zip(A_ , A_ ):
# `attributes` here is all the variant names for `config_param`
a : str = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(A_ , A_ , A_ , A_ ):
unused_attributes.append(attributes[0] )
return sorted(A_ )
def lowercase ( )-> str:
'''simple docstring'''
a : List[Any] = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
a : str = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda A_ : inspect.isclass(A_ )
and issubclass(A_ , A_ )
and inspect.getmodule(A_ ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
a : Union[str, Any] = check_config_attributes_being_used(A_ )
if len(A_ ) > 0:
a : Dict = unused_attributes
if len(A_ ) > 0:
a : Union[str, Any] = "The following configuration classes contain unused attributes in the corresponding modeling files:\n"
for name, attributes in configs_with_unused_attributes.items():
error += F'''{name}: {attributes}\n'''
raise ValueError(A_ )
if __name__ == "__main__":
check_config_attributes()
| 226
| 0
|
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = abs(UpperCamelCase )
lowerCAmelCase__ : List[Any] = 0
while n > 0:
res += n % 10
n //= 10
return res
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = abs(UpperCamelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
return sum(int(UpperCamelCase ) for c in str(abs(UpperCamelCase ) ) )
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(UpperCamelCase , UpperCamelCase ) -> None:
lowerCAmelCase__ : str = f"""{func.__name__}({value})"""
lowerCAmelCase__ : str = timeit(f"""__main__.{call}""" , setup="""import __main__""" )
print(f"""{call:56} = {func(UpperCamelCase )} -- {timing:.4f} seconds""" )
for value in (262144, 1125899906842624, 1267650600228229401496703205376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(UpperCamelCase , UpperCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 37
|
'''simple docstring'''
from maths.prime_factors import prime_factors
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
if not isinstance(UpperCamelCase , UpperCamelCase ):
lowerCAmelCase__ : int = f"""Input value of [number={number}] must be an integer"""
raise TypeError(UpperCamelCase )
if number < 1:
raise ValueError("""Input must be a positive integer""" )
return -1 if len(prime_factors(UpperCamelCase ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 37
| 1
|
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowercase__ : Any = '''src/diffusers'''
lowercase__ : List[str] = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
lowercase__ : List[str] = importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
lowercase__ : List[Any] = spec.loader.load_module()
def _lowerCAmelCase ( __snake_case : int , __snake_case : int ) -> Dict:
return line.startswith(__snake_case ) or len(__snake_case ) <= 1 or re.search(r'^\s*\)(\s*->.*:|:)\s*$' , __snake_case ) is not None
def _lowerCAmelCase ( __snake_case : Union[str, Any] ) -> List[Any]:
__A : List[Any] = object_name.split('.' )
__A : Optional[int] = 0
# First let's find the module where our object lives.
__A : str = parts[i]
while i < len(__snake_case ) and not os.path.isfile(os.path.join(__snake_case , f'{module}.py' ) ):
i += 1
if i < len(__snake_case ):
__A : int = os.path.join(__snake_case , parts[i] )
if i >= len(__snake_case ):
raise ValueError(f'`object_name` should begin with the name of a module of diffusers but got {object_name}.' )
with open(os.path.join(__snake_case , f'{module}.py' ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
__A : Union[str, Any] = f.readlines()
# Now let's find the class / func in the code!
__A : str = ''
__A : int = 0
for name in parts[i + 1 :]:
while (
line_index < len(__snake_case ) and re.search(rf'^{indent}(class|def)\s+{name}(\(|\:)' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(__snake_case ):
raise ValueError(f' {object_name} does not match any function or class in {module}.' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
__A : Tuple = line_index
while line_index < len(__snake_case ) and _should_continue(lines[line_index] , __snake_case ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__A : List[str] = lines[start_index:line_index]
return "".join(__snake_case )
lowercase__ : Optional[Any] = re.compile(r'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
lowercase__ : int = re.compile(r'''^\s*(\S+)->(\S+)(\s+.*|$)''')
lowercase__ : List[str] = re.compile(r'''<FILL\s+[^>]*>''')
def _lowerCAmelCase ( __snake_case : Dict ) -> List[str]:
__A : int = code.split('\n' )
__A : Any = 0
while idx < len(__snake_case ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(__snake_case ):
return re.search(r'^(\s*)\S' , lines[idx] ).groups()[0]
return ""
def _lowerCAmelCase ( __snake_case : Tuple ) -> Dict:
__A : Tuple = len(get_indent(__snake_case ) ) > 0
if has_indent:
__A : int = f'class Bla:\n{code}'
__A : Union[str, Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 , preview=__snake_case )
__A : Optional[Any] = black.format_str(__snake_case , mode=__snake_case )
__A : Optional[Any] = style_docstrings_in_code(__snake_case )
return result[len('class Bla:\n' ) :] if has_indent else result
def _lowerCAmelCase ( __snake_case : Dict , __snake_case : Optional[Any]=False ) -> Dict:
with open(__snake_case , 'r' , encoding='utf-8' , newline='\n' ) as f:
__A : List[Any] = f.readlines()
__A : Optional[Any] = []
__A : Union[str, Any] = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(__snake_case ):
__A : Dict = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
__A : str = search.groups()
__A : Dict = find_code_in_diffusers(__snake_case )
__A : List[str] = get_indent(__snake_case )
__A : Dict = line_index + 1 if indent == theoretical_indent else line_index + 2
__A : Dict = theoretical_indent
__A : List[Any] = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
__A : Optional[Any] = True
while line_index < len(__snake_case ) and should_continue:
line_index += 1
if line_index >= len(__snake_case ):
break
__A : int = lines[line_index]
__A : List[Any] = _should_continue(__snake_case , __snake_case ) and re.search(f'^{indent}# End copy' , __snake_case ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__A : Dict = lines[start_index:line_index]
__A : Optional[Any] = ''.join(__snake_case )
# Remove any nested `Copied from` comments to avoid circular copies
__A : int = [line for line in theoretical_code.split('\n' ) if _re_copy_warning.search(__snake_case ) is None]
__A : Optional[Any] = '\n'.join(__snake_case )
# Before comparing, use the `replace_pattern` on the original code.
if len(__snake_case ) > 0:
__A : int = replace_pattern.replace('with' , '' ).split(',' )
__A : str = [_re_replace_pattern.search(__snake_case ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
__A : str = pattern.groups()
__A : Optional[int] = re.sub(__snake_case , __snake_case , __snake_case )
if option.strip() == "all-casing":
__A : Optional[Any] = re.sub(obja.lower() , obja.lower() , __snake_case )
__A : List[str] = re.sub(obja.upper() , obja.upper() , __snake_case )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
__A : Optional[Any] = blackify(lines[start_index - 1] + theoretical_code )
__A : Any = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
__A : Any = lines[:start_index] + [theoretical_code] + lines[line_index:]
__A : Optional[int] = start_index + 1
if overwrite and len(__snake_case ) > 0:
# Warn the user a file has been modified.
print(f'Detected changes, rewriting {filename}.' )
with open(__snake_case , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(__snake_case )
return diffs
def _lowerCAmelCase ( __snake_case : bool = False ) -> Union[str, Any]:
__A : int = glob.glob(os.path.join(__snake_case , '**/*.py' ) , recursive=__snake_case )
__A : List[str] = []
for filename in all_files:
__A : Any = is_copy_consistent(__snake_case , __snake_case )
diffs += [f'- {filename}: copy does not match {d[0]} at line {d[1]}' for d in new_diffs]
if not overwrite and len(__snake_case ) > 0:
__A : Dict = '\n'.join(__snake_case )
raise Exception(
'Found the following copy inconsistencies:\n'
+ diff
+ '\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.' )
if __name__ == "__main__":
lowercase__ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
lowercase__ : Optional[Any] = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 361
|
'''simple docstring'''
from __future__ import annotations
from math import gcd
def _lowerCAmelCase ( __snake_case : int , __snake_case : int = 2 , __snake_case : int = 1 , __snake_case : int = 3 , ) -> int | None:
# A value less than 2 can cause an infinite loop in the algorithm.
if num < 2:
raise ValueError('The input value cannot be less than 2' )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(__snake_case : int , __snake_case : int , __snake_case : int ) -> int:
return (pow(__snake_case , 2 ) + step) % modulus
for _ in range(__snake_case ):
# These track the position within the cycle detection logic.
__A : int = seed
__A : Union[str, Any] = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
__A : List[Any] = rand_fn(__snake_case , __snake_case , __snake_case )
__A : Optional[Any] = rand_fn(__snake_case , __snake_case , __snake_case )
__A : Any = rand_fn(__snake_case , __snake_case , __snake_case )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
__A : Optional[int] = gcd(hare - tortoise , __snake_case )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
__A : Union[str, Any] = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
lowercase__ : str = argparse.ArgumentParser()
parser.add_argument(
'''num''',
type=int,
help='''The value to find a divisor of''',
)
parser.add_argument(
'''--attempts''',
type=int,
default=3,
help='''The number of attempts before giving up''',
)
lowercase__ : Optional[int] = parser.parse_args()
lowercase__ : int = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f"""{args.num} is probably prime""")
else:
lowercase__ : List[str] = args.num // divisor
print(f"""{args.num} = {divisor} * {quotient}""")
| 190
| 0
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
snake_case_ = {'configuration_dpt': ['DPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DPTConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['DPTFeatureExtractor']
snake_case_ = ['DPTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'DPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DPTForDepthEstimation',
'DPTForSemanticSegmentation',
'DPTModel',
'DPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 24
|
'''simple docstring'''
from __future__ import annotations
from typing import Generic, TypeVar
a_ : List[str] = TypeVar("""T""")
class snake_case ( Generic[T] ):
"""simple docstring"""
def __init__( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = data
lowerCamelCase_ = self
lowerCamelCase_ = 0
class snake_case ( Generic[T] ):
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
# map from node name to the node object
lowerCamelCase_ = {}
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
# create a new set with x as its member
lowerCamelCase_ = DisjointSetTreeNode(UpperCamelCase )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
# find the set x belongs to (with path-compression)
lowerCamelCase_ = self.map[data]
if elem_ref != elem_ref.parent:
lowerCamelCase_ = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def snake_case ( self , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
# helper function for union operation
if nodea.rank > nodea.rank:
lowerCamelCase_ = nodea
else:
lowerCamelCase_ = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def snake_case ( self , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
# merge 2 disjoint sets
self.link(self.find_set(UpperCamelCase ) , self.find_set(UpperCamelCase ) )
class snake_case ( Generic[T] ):
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
# connections: map from the node to the neighbouring nodes (with weights)
lowerCamelCase_ = {}
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
# add a node ONLY if its not present in the graph
if node not in self.connections:
lowerCamelCase_ = {}
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
# add an edge with the given weight
self.add_node(UpperCamelCase )
self.add_node(UpperCamelCase )
lowerCamelCase_ = weight
lowerCamelCase_ = weight
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = []
lowerCamelCase_ = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda UpperCamelCase : x[2] )
# creating the disjoint set
lowerCamelCase_ = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(UpperCamelCase )
# MST generation
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = edges[index]
index += 1
lowerCamelCase_ = disjoint_set.find_set(UpperCamelCase )
lowerCamelCase_ = disjoint_set.find_set(UpperCamelCase )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(UpperCamelCase , UpperCamelCase , UpperCamelCase )
disjoint_set.union(UpperCamelCase , UpperCamelCase )
return graph
| 55
| 0
|
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class a__ ( unittest.TestCase ):
_a : Dict = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_a : Optional[int] = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = TextaTextGenerationPipeline(model=_A , tokenizer=_A )
return generator, ["Something to write", "Something else"]
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = generator("Something there" )
self.assertEqual(_A , [{"generated_text": ANY(_A )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["generated_text"].startswith("Something there" ) )
__lowerCAmelCase = generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=_A )
self.assertEqual(
_A , [
[{"generated_text": ANY(_A )}, {"generated_text": ANY(_A )}],
[{"generated_text": ANY(_A )}, {"generated_text": ANY(_A )}],
] , )
__lowerCAmelCase = generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=_A )
self.assertEqual(
_A , [
[{"generated_text": ANY(_A )}, {"generated_text": ANY(_A )}],
[{"generated_text": ANY(_A )}, {"generated_text": ANY(_A )}],
] , )
with self.assertRaises(_A ):
generator(4 )
@require_torch
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="pt" )
# do_sample=False necessary for reproducibility
__lowerCAmelCase = generator("Something there" , do_sample=_A )
self.assertEqual(_A , [{"generated_text": ""}] )
__lowerCAmelCase = 3
__lowerCAmelCase = generator(
"Something there" , num_return_sequences=_A , num_beams=_A , )
__lowerCAmelCase = [
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": ""},
]
self.assertEqual(_A , _A )
__lowerCAmelCase = generator("This is a test" , do_sample=_A , num_return_sequences=2 , return_tensors=_A )
self.assertEqual(
_A , [
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
] , )
__lowerCAmelCase = generator.model.config.eos_token_id
__lowerCAmelCase = "<pad>"
__lowerCAmelCase = generator(
["This is a test", "This is a second test"] , do_sample=_A , num_return_sequences=2 , batch_size=2 , return_tensors=_A , )
self.assertEqual(
_A , [
[
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
],
[
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
],
] , )
@require_tf
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="tf" )
# do_sample=False necessary for reproducibility
__lowerCAmelCase = generator("Something there" , do_sample=_A )
self.assertEqual(_A , [{"generated_text": ""}] )
| 102
|
from sklearn.metrics import mean_squared_error
import datasets
UpperCamelCase__ = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
UpperCamelCase__ = """\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
"""
UpperCamelCase__ = """
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
\"raw_values\" : Returns a full set of errors in case of multioutput input.
\"uniform_average\" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric(\"mse\")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{'mse': 0.6123724356957945}
If you're using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mse': array([0.41666667, 1. ])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"
] , )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("float" ) ),
"references": datasets.Sequence(datasets.Value("float" ) ),
}
else:
return {
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
}
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A=None , _A="uniform_average" , _A=True ):
"""simple docstring"""
__lowerCAmelCase = mean_squared_error(
_A , _A , sample_weight=_A , multioutput=_A , squared=_A )
return {"mse": mse}
| 102
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
SCREAMING_SNAKE_CASE : Optional[int] = {"tokenization_herbert": ["HerbertTokenizer"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[int] = ["HerbertTokenizerFast"]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 21
|
from __future__ import annotations
def UpperCamelCase_( lowerCamelCase_ ) -> bool:
if len(lowerCamelCase_ ) < 2:
raise ValueError('Monogons and Digons are not polygons in the Euclidean space' )
if any(i <= 0 for i in nums ):
raise ValueError('All values must be greater than 0' )
_lowercase : Tuple = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class __lowerCamelCase ( __snake_case ):
lowerCamelCase_ : List[str] = 'mobilenet_v1'
def __init__( self , lowerCamelCase=3 , lowerCamelCase=224 , lowerCamelCase=1.0 , lowerCamelCase=8 , lowerCamelCase="relu6" , lowerCamelCase=True , lowerCamelCase=0.999 , lowerCamelCase=0.02 , lowerCamelCase=0.001 , **lowerCamelCase , ) -> List[str]:
super().__init__(**lowerCamelCase )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
snake_case_ = num_channels
snake_case_ = image_size
snake_case_ = depth_multiplier
snake_case_ = min_depth
snake_case_ = hidden_act
snake_case_ = tf_padding
snake_case_ = classifier_dropout_prob
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
class __lowerCamelCase ( __snake_case ):
lowerCamelCase_ : str = version.parse('1.11' )
@property
def lowerCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def lowerCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def lowerCAmelCase_ ( self ) -> float:
return 1e-4
| 34
|
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
lowerCamelCase_ = re.compile(R'''\b(a|an|the)\b''', re.UNICODE)
lowerCamelCase_ = None
def UpperCamelCase( ) -> List[Any]:
'''simple docstring'''
snake_case_ = argparse.ArgumentParser("""Official evaluation script for SQuAD version 2.0.""" )
parser.add_argument("""data_file""" , metavar="""data.json""" , help="""Input data JSON file.""" )
parser.add_argument("""pred_file""" , metavar="""pred.json""" , help="""Model predictions.""" )
parser.add_argument(
"""--out-file""" , """-o""" , metavar="""eval.json""" , help="""Write accuracy metrics to file (default is stdout).""" )
parser.add_argument(
"""--na-prob-file""" , """-n""" , metavar="""na_prob.json""" , help="""Model estimates of probability of no answer.""" )
parser.add_argument(
"""--na-prob-thresh""" , """-t""" , type=lowercase_ , default=1.0 , help="""Predict \"\" if no-answer probability exceeds this (default = 1.0).""" , )
parser.add_argument(
"""--out-image-dir""" , """-p""" , metavar="""out_images""" , default=lowercase_ , help="""Save precision-recall curves to directory.""" )
parser.add_argument("""--verbose""" , """-v""" , action="""store_true""" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def UpperCamelCase( lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
snake_case_ = bool(qa["""answers"""]["""text"""] )
return qid_to_has_ans
def UpperCamelCase( lowercase_ ) -> Tuple:
'''simple docstring'''
def remove_articles(lowercase_ ):
return ARTICLES_REGEX.sub(""" """ , lowercase_ )
def white_space_fix(lowercase_ ):
return " ".join(text.split() )
def remove_punc(lowercase_ ):
snake_case_ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowercase_ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowercase_ ) ) ) )
def UpperCamelCase( lowercase_ ) -> Dict:
'''simple docstring'''
if not s:
return []
return normalize_answer(lowercase_ ).split()
def UpperCamelCase( lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
return int(normalize_answer(lowercase_ ) == normalize_answer(lowercase_ ) )
def UpperCamelCase( lowercase_ , lowercase_ ) -> Any:
'''simple docstring'''
snake_case_ = get_tokens(lowercase_ )
snake_case_ = get_tokens(lowercase_ )
snake_case_ = collections.Counter(lowercase_ ) & collections.Counter(lowercase_ )
snake_case_ = sum(common.values() )
if len(lowercase_ ) == 0 or len(lowercase_ ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
snake_case_ = 1.0 * num_same / len(lowercase_ )
snake_case_ = 1.0 * num_same / len(lowercase_ )
snake_case_ = (2 * precision * recall) / (precision + recall)
return fa
def UpperCamelCase( lowercase_ , lowercase_ ) -> List[str]:
'''simple docstring'''
snake_case_ = {}
snake_case_ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
snake_case_ = qa["""id"""]
snake_case_ = [t for t in qa["""answers"""]["""text"""] if normalize_answer(lowercase_ )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
snake_case_ = [""""""]
if qid not in preds:
print(f'''Missing prediction for {qid}''' )
continue
snake_case_ = preds[qid]
# Take max over all gold answers
snake_case_ = max(compute_exact(lowercase_ , lowercase_ ) for a in gold_answers )
snake_case_ = max(compute_fa(lowercase_ , lowercase_ ) for a in gold_answers )
return exact_scores, fa_scores
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = {}
for qid, s in scores.items():
snake_case_ = na_probs[qid] > na_prob_thresh
if pred_na:
snake_case_ = float(not qid_to_has_ans[qid] )
else:
snake_case_ = s
return new_scores
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_=None ) -> Dict:
'''simple docstring'''
if not qid_list:
snake_case_ = len(lowercase_ )
return collections.OrderedDict(
[
("""exact""", 1_00.0 * sum(exact_scores.values() ) / total),
("""f1""", 1_00.0 * sum(fa_scores.values() ) / total),
("""total""", total),
] )
else:
snake_case_ = len(lowercase_ )
return collections.OrderedDict(
[
("""exact""", 1_00.0 * sum(exact_scores[k] for k in qid_list ) / total),
("""f1""", 1_00.0 * sum(fa_scores[k] for k in qid_list ) / total),
("""total""", total),
] )
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ ) -> Any:
'''simple docstring'''
for k in new_eval:
snake_case_ = new_eval[k]
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
plt.step(lowercase_ , lowercase_ , color="""b""" , alpha=0.2 , where="""post""" )
plt.fill_between(lowercase_ , lowercase_ , step="""post""" , alpha=0.2 , color="""b""" )
plt.xlabel("""Recall""" )
plt.ylabel("""Precision""" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(lowercase_ )
plt.savefig(lowercase_ )
plt.clf()
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None ) -> Dict:
'''simple docstring'''
snake_case_ = sorted(lowercase_ , key=lambda lowercase_ : na_probs[k] )
snake_case_ = 0.0
snake_case_ = 1.0
snake_case_ = 0.0
snake_case_ = [1.0]
snake_case_ = [0.0]
snake_case_ = 0.0
for i, qid in enumerate(lowercase_ ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
snake_case_ = true_pos / float(i + 1 )
snake_case_ = true_pos / float(lowercase_ )
if i == len(lowercase_ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(lowercase_ )
recalls.append(lowercase_ )
if out_image:
plot_pr_curve(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
return {"ap": 1_00.0 * avg_prec}
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
if out_image_dir and not os.path.exists(lowercase_ ):
os.makedirs(lowercase_ )
snake_case_ = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
snake_case_ = make_precision_recall_eval(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , out_image=os.path.join(lowercase_ , """pr_exact.png""" ) , title="""Precision-Recall curve for Exact Match score""" , )
snake_case_ = make_precision_recall_eval(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , out_image=os.path.join(lowercase_ , """pr_f1.png""" ) , title="""Precision-Recall curve for F1 score""" , )
snake_case_ = {k: float(lowercase_ ) for k, v in qid_to_has_ans.items()}
snake_case_ = make_precision_recall_eval(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , out_image=os.path.join(lowercase_ , """pr_oracle.png""" ) , title="""Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)""" , )
merge_eval(lowercase_ , lowercase_ , """pr_exact""" )
merge_eval(lowercase_ , lowercase_ , """pr_f1""" )
merge_eval(lowercase_ , lowercase_ , """pr_oracle""" )
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
if not qid_list:
return
snake_case_ = [na_probs[k] for k in qid_list]
snake_case_ = np.ones_like(lowercase_ ) / float(len(lowercase_ ) )
plt.hist(lowercase_ , weights=lowercase_ , bins=20 , range=(0.0, 1.0) )
plt.xlabel("""Model probability of no-answer""" )
plt.ylabel("""Proportion of dataset""" )
plt.title(f'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(lowercase_ , f'''na_prob_hist_{name}.png''' ) )
plt.clf()
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
snake_case_ = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
snake_case_ = num_no_ans
snake_case_ = cur_score
snake_case_ = 0.0
snake_case_ = sorted(lowercase_ , key=lambda lowercase_ : na_probs[k] )
for i, qid in enumerate(lowercase_ ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
snake_case_ = scores[qid]
else:
if preds[qid]:
snake_case_ = -1
else:
snake_case_ = 0
cur_score += diff
if cur_score > best_score:
snake_case_ = cur_score
snake_case_ = na_probs[qid]
return 1_00.0 * best_score / len(lowercase_ ), best_thresh
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Any:
'''simple docstring'''
snake_case_ , snake_case_ = find_best_thresh(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
snake_case_ , snake_case_ = find_best_thresh(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
snake_case_ = best_exact
snake_case_ = exact_thresh
snake_case_ = best_fa
snake_case_ = fa_thresh
def UpperCamelCase( ) -> Union[str, Any]:
'''simple docstring'''
with open(OPTS.data_file ) as f:
snake_case_ = json.load(lowercase_ )
snake_case_ = dataset_json["""data"""]
with open(OPTS.pred_file ) as f:
snake_case_ = json.load(lowercase_ )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
snake_case_ = json.load(lowercase_ )
else:
snake_case_ = {k: 0.0 for k in preds}
snake_case_ = make_qid_to_has_ans(lowercase_ ) # maps qid to True/False
snake_case_ = [k for k, v in qid_to_has_ans.items() if v]
snake_case_ = [k for k, v in qid_to_has_ans.items() if not v]
snake_case_ , snake_case_ = get_raw_scores(lowercase_ , lowercase_ )
snake_case_ = apply_no_ans_threshold(lowercase_ , lowercase_ , lowercase_ , OPTS.na_prob_thresh )
snake_case_ = apply_no_ans_threshold(lowercase_ , lowercase_ , lowercase_ , OPTS.na_prob_thresh )
snake_case_ = make_eval_dict(lowercase_ , lowercase_ )
if has_ans_qids:
snake_case_ = make_eval_dict(lowercase_ , lowercase_ , qid_list=lowercase_ )
merge_eval(lowercase_ , lowercase_ , """HasAns""" )
if no_ans_qids:
snake_case_ = make_eval_dict(lowercase_ , lowercase_ , qid_list=lowercase_ )
merge_eval(lowercase_ , lowercase_ , """NoAns""" )
if OPTS.na_prob_file:
find_all_best_thresh(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , OPTS.out_image_dir )
histogram_na_prob(lowercase_ , lowercase_ , OPTS.out_image_dir , """hasAns""" )
histogram_na_prob(lowercase_ , lowercase_ , OPTS.out_image_dir , """noAns""" )
if OPTS.out_file:
with open(OPTS.out_file , """w""" ) as f:
json.dump(lowercase_ , lowercase_ )
else:
print(json.dumps(lowercase_ , indent=2 ) )
if __name__ == "__main__":
lowerCamelCase_ = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('''Agg''')
import matplotlib.pyplot as plt
main()
| 34
| 1
|
"""simple docstring"""
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
UpperCAmelCase__ : Tuple = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
UpperCAmelCase__ : Optional[Any] = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f"""{len(upper_files)} files contain uppercase characters:""")
print('\n'.join(upper_files) + '\n')
UpperCAmelCase__ : Union[str, Any] = [file for file in filepaths if ' ' in file]
if space_files:
print(f"""{len(space_files)} files contain space characters:""")
print('\n'.join(space_files) + '\n')
UpperCAmelCase__ : Optional[Any] = [file for file in filepaths if '-' in file]
if hyphen_files:
print(f"""{len(hyphen_files)} files contain hyphen characters:""")
print('\n'.join(hyphen_files) + '\n')
UpperCAmelCase__ : Optional[Any] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f"""{len(nodir_files)} files are not in a directory:""")
print('\n'.join(nodir_files) + '\n')
UpperCAmelCase__ : List[str] = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 25
|
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCAmelCase__ ( __UpperCamelCase ):
'''simple docstring'''
def snake_case__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(a_ , '''width_multiplier''' ) )
class UpperCAmelCase__ :
'''simple docstring'''
def __init__( self : str , a_ : Any , a_ : Tuple=13 , a_ : Tuple=64 , a_ : Optional[int]=2 , a_ : List[str]=3 , a_ : Optional[Any]="swish" , a_ : Optional[Any]=3 , a_ : str=32 , a_ : Dict=0.1 , a_ : int=0.0_2 , a_ : Tuple=True , a_ : List[Any]=True , a_ : Optional[int]=10 , a_ : Optional[int]=None , a_ : Dict=0.2_5 , a_ : Tuple=0.0 , a_ : List[Any]=0.0 , ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = parent
__UpperCAmelCase : str = batch_size
__UpperCAmelCase : Dict = image_size
__UpperCAmelCase : List[Any] = patch_size
__UpperCAmelCase : Union[str, Any] = num_channels
__UpperCAmelCase : Any = make_divisible(5_12 * width_multiplier , divisor=8 )
__UpperCAmelCase : Tuple = hidden_act
__UpperCAmelCase : Dict = conv_kernel_size
__UpperCAmelCase : Optional[Any] = output_stride
__UpperCAmelCase : Dict = classifier_dropout_prob
__UpperCAmelCase : Optional[Any] = use_labels
__UpperCAmelCase : List[Any] = is_training
__UpperCAmelCase : Tuple = num_labels
__UpperCAmelCase : List[str] = initializer_range
__UpperCAmelCase : Optional[Any] = scope
__UpperCAmelCase : Optional[Any] = width_multiplier
__UpperCAmelCase : List[str] = ffn_dropout
__UpperCAmelCase : Dict = attn_dropout
def snake_case__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : List[str] = None
__UpperCAmelCase : List[str] = None
if self.use_labels:
__UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
__UpperCAmelCase : str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__UpperCAmelCase : List[str] = self.get_config()
return config, pixel_values, labels, pixel_labels
def snake_case__ ( self : Union[str, Any] ):
'''simple docstring'''
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def snake_case__ ( self : Optional[Any] , a_ : Dict , a_ : List[Any] , a_ : Dict , a_ : Tuple ):
'''simple docstring'''
__UpperCAmelCase : List[str] = MobileViTVaModel(config=a_ )
model.to(a_ )
model.eval()
__UpperCAmelCase : Tuple = model(a_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def snake_case__ ( self : Union[str, Any] , a_ : Dict , a_ : Union[str, Any] , a_ : str , a_ : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Dict = self.num_labels
__UpperCAmelCase : List[str] = MobileViTVaForImageClassification(a_ )
model.to(a_ )
model.eval()
__UpperCAmelCase : List[Any] = model(a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self : str , a_ : List[str] , a_ : Optional[Any] , a_ : List[str] , a_ : int ):
'''simple docstring'''
__UpperCAmelCase : Any = self.num_labels
__UpperCAmelCase : List[Any] = MobileViTVaForSemanticSegmentation(a_ )
model.to(a_ )
model.eval()
__UpperCAmelCase : Tuple = model(a_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__UpperCAmelCase : List[str] = model(a_ , labels=a_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def snake_case__ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : str = config_and_inputs
__UpperCAmelCase : Optional[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( __UpperCamelCase ,__UpperCamelCase ,unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCamelCase = (
{
"""feature-extraction""": MobileViTVaModel,
"""image-classification""": MobileViTVaForImageClassification,
"""image-segmentation""": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def snake_case__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Dict = MobileViTVaModelTester(self )
__UpperCAmelCase : Union[str, Any] = MobileViTVaConfigTester(self , config_class=a_ , has_text_modality=a_ )
def snake_case__ ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViTV2 does not use inputs_embeds''' )
def snake_case__ ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='''MobileViTV2 does not support input and output embeddings''' )
def snake_case__ ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='''MobileViTV2 does not output attentions''' )
def snake_case__ ( self : Tuple ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='''Got `CUDA error: misaligned address` for tests after this one being run.''' )
def snake_case__ ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def snake_case__ ( self : Optional[Any] ):
'''simple docstring'''
pass
def snake_case__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Any = model_class(a_ )
__UpperCAmelCase : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : Any = [*signature.parameters.keys()]
__UpperCAmelCase : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a_ )
def snake_case__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def snake_case__ ( self : int ):
'''simple docstring'''
def check_hidden_states_output(a_ : List[Any] , a_ : List[Any] , a_ : Union[str, Any] ):
__UpperCAmelCase : Optional[int] = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
__UpperCAmelCase : Dict = model(**self._prepare_for_class(a_ , a_ ) )
__UpperCAmelCase : List[Any] = outputs.hidden_states
__UpperCAmelCase : str = 5
self.assertEqual(len(a_ ) , a_ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__UpperCAmelCase : Any = 2
for i in range(len(a_ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Dict = True
check_hidden_states_output(a_ , a_ , a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Optional[Any] = True
check_hidden_states_output(a_ , a_ , a_ )
def snake_case__ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
def snake_case__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a_ )
@slow
def snake_case__ ( self : Optional[int] ):
'''simple docstring'''
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Optional[int] = MobileViTVaModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def a ( ):
'''simple docstring'''
__UpperCAmelCase : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case__ ( self : Optional[int] ):
'''simple docstring'''
return (
MobileViTImageProcessor.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' )
if is_vision_available()
else None
)
@slow
def snake_case__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = MobileViTVaForImageClassification.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ).to(
a_ )
__UpperCAmelCase : Optional[int] = self.default_image_processor
__UpperCAmelCase : int = prepare_img()
__UpperCAmelCase : Union[str, Any] = image_processor(images=a_ , return_tensors='''pt''' ).to(a_ )
# forward pass
with torch.no_grad():
__UpperCAmelCase : Union[str, Any] = model(**a_ )
# verify the logits
__UpperCAmelCase : str = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , a_ )
__UpperCAmelCase : Any = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a_ , atol=1e-4 ) )
@slow
def snake_case__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : int = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__UpperCAmelCase : int = model.to(a_ )
__UpperCAmelCase : str = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__UpperCAmelCase : List[str] = prepare_img()
__UpperCAmelCase : Tuple = image_processor(images=a_ , return_tensors='''pt''' ).to(a_ )
# forward pass
with torch.no_grad():
__UpperCAmelCase : Dict = model(**a_ )
__UpperCAmelCase : int = outputs.logits
# verify the logits
__UpperCAmelCase : Union[str, Any] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , a_ )
__UpperCAmelCase : List[Any] = torch.tensor(
[
[[7.0_8_6_3, 7.1_5_2_5, 6.8_2_0_1], [6.6_9_3_1, 6.8_7_7_0, 6.8_9_3_3], [6.2_9_7_8, 7.0_3_6_6, 6.9_6_3_6]],
[[-3.7_1_3_4, -3.6_7_1_2, -3.6_6_7_5], [-3.5_8_2_5, -3.3_5_4_9, -3.4_7_7_7], [-3.3_4_3_5, -3.3_9_7_9, -3.2_8_5_7]],
[[-2.9_3_2_9, -2.8_0_0_3, -2.7_3_6_9], [-3.0_5_6_4, -2.4_7_8_0, -2.0_2_0_7], [-2.6_8_8_9, -1.9_2_9_8, -1.7_6_4_0]],
] , device=a_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , a_ , atol=1e-4 ) )
@slow
def snake_case__ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : Tuple = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__UpperCAmelCase : Optional[int] = model.to(a_ )
__UpperCAmelCase : List[Any] = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__UpperCAmelCase : Optional[int] = prepare_img()
__UpperCAmelCase : Dict = image_processor(images=a_ , return_tensors='''pt''' ).to(a_ )
# forward pass
with torch.no_grad():
__UpperCAmelCase : Union[str, Any] = model(**a_ )
__UpperCAmelCase : Optional[Any] = outputs.logits.detach().cpu()
__UpperCAmelCase : List[Any] = image_processor.post_process_semantic_segmentation(outputs=a_ , target_sizes=[(50, 60)] )
__UpperCAmelCase : Tuple = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , a_ )
__UpperCAmelCase : List[Any] = image_processor.post_process_semantic_segmentation(outputs=a_ )
__UpperCAmelCase : Optional[int] = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , a_ )
| 226
| 0
|
import math
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase = 100 ) -> int:
'''simple docstring'''
lowerCAmelCase : Any = sum(i * i for i in range(1, n + 1 ) )
lowerCAmelCase : str = int(math.pow(sum(range(1, n + 1 ) ), 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F'{solution() = }')
| 323
|
from math import pi, sqrt, tan
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
lowerCAmelCase : Optional[int] = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(_UpperCAmelCase, 2 ) * torus_radius * tube_radius
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
lowerCAmelCase : Optional[Any] = (sidea + sidea + sidea) / 2
lowerCAmelCase : Any = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if not isinstance(_UpperCAmelCase, _UpperCAmelCase ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('''[DEMO] Areas of various geometric shapes: \n''')
print(F'Rectangle: {area_rectangle(10, 20) = }')
print(F'Square: {area_square(10) = }')
print(F'Triangle: {area_triangle(10, 10) = }')
print(F'Triangle: {area_triangle_three_sides(5, 12, 13) = }')
print(F'Parallelogram: {area_parallelogram(10, 20) = }')
print(F'Rhombus: {area_rhombus(10, 20) = }')
print(F'Trapezium: {area_trapezium(10, 20, 30) = }')
print(F'Circle: {area_circle(20) = }')
print(F'Ellipse: {area_ellipse(10, 20) = }')
print('''\nSurface Areas of various geometric shapes: \n''')
print(F'Cube: {surface_area_cube(20) = }')
print(F'Cuboid: {surface_area_cuboid(10, 20, 30) = }')
print(F'Sphere: {surface_area_sphere(20) = }')
print(F'Hemisphere: {surface_area_hemisphere(20) = }')
print(F'Cone: {surface_area_cone(10, 20) = }')
print(F'Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }')
print(F'Cylinder: {surface_area_cylinder(10, 20) = }')
print(F'Torus: {surface_area_torus(20, 10) = }')
print(F'Equilateral Triangle: {area_reg_polygon(3, 10) = }')
print(F'Square: {area_reg_polygon(4, 10) = }')
print(F'Reqular Pentagon: {area_reg_polygon(5, 10) = }')
| 323
| 1
|
def lowercase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ) -> float:
def get_matched_characters(SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ) -> str:
_snake_case : Union[str, Any] = []
_snake_case : List[Any] = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
_snake_case : List[str] = int(max(0 , i - limit ) )
_snake_case : List[Any] = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(__snake_case )
_snake_case : int = F'''{_stra[0:_stra.index(__snake_case )]} {_stra[_stra.index(__snake_case ) + 1:]}'''
return "".join(__snake_case )
# matching characters
_snake_case : Union[str, Any] = get_matched_characters(__snake_case , __snake_case )
_snake_case : str = get_matched_characters(__snake_case , __snake_case )
_snake_case : Tuple = len(__snake_case )
# transposition
_snake_case : Optional[int] = (
len([(ca, ca) for ca, ca in zip(__snake_case , __snake_case ) if ca != ca] ) // 2
)
if not match_count:
_snake_case : List[Any] = 0.0
else:
_snake_case : int = (
1
/ 3
* (
match_count / len(__snake_case )
+ match_count / len(__snake_case )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
_snake_case : Any = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("""hello""", """world"""))
| 317
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ : int = logging.get_logger(__name__)
lowercase__ : int = {
'''andreasmadsen/efficient_mlm_m0.40''': (
'''https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''roberta-prelayernorm'''
def __init__( self , _UpperCAmelCase=5_0265 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , _UpperCAmelCase="absolute" , _UpperCAmelCase=True , _UpperCAmelCase=None , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase)
__A : Optional[int] = vocab_size
__A : List[Any] = hidden_size
__A : Optional[int] = num_hidden_layers
__A : Optional[Any] = num_attention_heads
__A : List[str] = hidden_act
__A : Dict = intermediate_size
__A : Optional[int] = hidden_dropout_prob
__A : Optional[Any] = attention_probs_dropout_prob
__A : Tuple = max_position_embeddings
__A : Union[str, Any] = type_vocab_size
__A : Any = initializer_range
__A : str = layer_norm_eps
__A : int = position_embedding_type
__A : Optional[Any] = use_cache
__A : Any = classifier_dropout
class SCREAMING_SNAKE_CASE (a__ ):
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
if self.task == "multiple-choice":
__A : Dict = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__A : Union[str, Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
])
| 190
| 0
|
"""simple docstring"""
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCAmelCase ( a_, a_ ):
'''simple docstring'''
lowerCamelCase : Dict = old_name
if "patch_embed" in old_name:
lowerCamelCase : Dict = old_name.split('.' )
if layer == "0":
lowerCamelCase : int = old_name.replace('0', 'convolution1' )
elif layer == "1":
lowerCamelCase : int = old_name.replace('1', 'batchnorm_before' )
elif layer == "3":
lowerCamelCase : str = old_name.replace('3', 'convolution2' )
else:
lowerCamelCase : str = old_name.replace('4', 'batchnorm_after' )
if "network" in old_name and re.search(r'\d\.\d', a_ ):
lowerCamelCase : Dict = r'\b\d{2}\b'
if bool(re.search(a_, a_ ) ):
lowerCamelCase : str = re.search(r'\d\.\d\d.', a_ ).group()
else:
lowerCamelCase : str = re.search(r'\d\.\d.', a_ ).group()
if int(match[0] ) < 6:
lowerCamelCase : Any = old_name.replace(a_, '' )
lowerCamelCase : Dict = trimmed_name.replace('network', match[0] + '.meta4D_layers.blocks.' + match[2:-1] )
lowerCamelCase : List[str] = 'intermediate_stages.' + trimmed_name
else:
lowerCamelCase : Union[str, Any] = old_name.replace(a_, '' )
if int(match[2] ) < num_meta4D_last_stage:
lowerCamelCase : Tuple = trimmed_name.replace('network', 'meta4D_layers.blocks.' + match[2] )
else:
lowerCamelCase : List[Any] = str(int(match[2] ) - num_meta4D_last_stage )
lowerCamelCase : Tuple = trimmed_name.replace('network', 'meta3D_layers.blocks.' + layer_index )
if "norm1" in old_name:
lowerCamelCase : str = trimmed_name.replace('norm1', 'layernorm1' )
elif "norm2" in old_name:
lowerCamelCase : Dict = trimmed_name.replace('norm2', 'layernorm2' )
elif "fc1" in old_name:
lowerCamelCase : Tuple = trimmed_name.replace('fc1', 'linear_in' )
elif "fc2" in old_name:
lowerCamelCase : int = trimmed_name.replace('fc2', 'linear_out' )
lowerCamelCase : List[Any] = 'last_stage.' + trimmed_name
elif "network" in old_name and re.search(r'.\d.', a_ ):
lowerCamelCase : Optional[int] = old_name.replace('network', 'intermediate_stages' )
if "fc" in new_name:
lowerCamelCase : str = new_name.replace('fc', 'convolution' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
lowerCamelCase : List[str] = new_name.replace('norm1', 'batchnorm_before' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
lowerCamelCase : List[str] = new_name.replace('norm2', 'batchnorm_after' )
if "proj" in new_name:
lowerCamelCase : int = new_name.replace('proj', 'projection' )
if "dist_head" in new_name:
lowerCamelCase : Optional[int] = new_name.replace('dist_head', 'distillation_classifier' )
elif "head" in new_name:
lowerCamelCase : str = new_name.replace('head', 'classifier' )
elif "patch_embed" in new_name:
lowerCamelCase : str = 'efficientformer.' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
lowerCamelCase : Tuple = new_name.replace('norm', 'layernorm' )
lowerCamelCase : Tuple = 'efficientformer.' + new_name
else:
lowerCamelCase : List[Any] = 'efficientformer.encoder.' + new_name
return new_name
def UpperCAmelCase ( a_, a_ ):
'''simple docstring'''
for key in checkpoint.copy().keys():
lowerCamelCase : Dict = checkpoint.pop(a_ )
lowerCamelCase : str = val
return checkpoint
def UpperCAmelCase ( ):
'''simple docstring'''
lowerCamelCase : str = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCamelCase : List[Any] = Image.open(requests.get(a_, stream=a_ ).raw )
return image
def UpperCAmelCase ( a_, a_, a_, a_ ):
'''simple docstring'''
lowerCamelCase : Any = torch.load(a_, map_location='cpu' )['model']
lowerCamelCase : int = EfficientFormerConfig.from_json_file(a_ )
lowerCamelCase : int = EfficientFormerForImageClassificationWithTeacher(a_ )
lowerCamelCase : List[Any] = '_'.join(checkpoint_path.split('/' )[-1].split('.' )[0].split('_' )[:-1] )
lowerCamelCase : List[str] = config.depths[-1] - config.num_metaad_blocks + 1
lowerCamelCase : Optional[int] = convert_torch_checkpoint(a_, a_ )
model.load_state_dict(a_ )
model.eval()
lowerCamelCase : str = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
# prepare image
lowerCamelCase : List[Any] = prepare_img()
lowerCamelCase : Optional[Any] = 256
lowerCamelCase : Optional[Any] = 224
lowerCamelCase : List[str] = EfficientFormerImageProcessor(
size={'shortest_edge': image_size}, crop_size={'height': crop_size, 'width': crop_size}, resample=pillow_resamplings['bicubic'], )
lowerCamelCase : Any = processor(images=a_, return_tensors='pt' ).pixel_values
# original processing pipeline
lowerCamelCase : Optional[int] = Compose(
[
Resize(a_, interpolation=pillow_resamplings['bicubic'] ),
CenterCrop(a_ ),
ToTensor(),
Normalize(a_, a_ ),
] )
lowerCamelCase : Tuple = image_transforms(a_ ).unsqueeze(0 )
assert torch.allclose(a_, a_ )
lowerCamelCase : Tuple = model(a_ )
lowerCamelCase : int = outputs.logits
lowerCamelCase : int = (1, 1000)
if "l1" in model_name:
lowerCamelCase : Any = torch.Tensor(
[-0.1_3_1_2, 0.4_3_5_3, -1.0_4_9_9, -0.5_1_2_4, 0.4_1_8_3, -0.6_7_9_3, -1.3_7_7_7, -0.0_8_9_3, -0.7_3_5_8, -2.4_3_2_8] )
assert torch.allclose(logits[0, :10], a_, atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
lowerCamelCase : Dict = torch.Tensor(
[-1.3_1_5_0, -1.5_4_5_6, -1.2_5_5_6, -0.8_4_9_6, -0.7_1_2_7, -0.7_8_9_7, -0.9_7_2_8, -0.3_0_5_2, 0.3_7_5_1, -0.3_1_2_7] )
assert torch.allclose(logits[0, :10], a_, atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
lowerCamelCase : Optional[int] = torch.Tensor(
[-1.0_2_8_3, -1.4_1_3_1, -0.5_6_4_4, -1.3_1_1_5, -0.5_7_8_5, -1.2_0_4_9, -0.7_5_2_8, 0.1_9_9_2, -0.3_8_2_2, -0.0_8_7_8] )
assert logits.shape == expected_shape
else:
raise ValueError(
F"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(a_ ).mkdir(exist_ok=a_ )
model.save_pretrained(a_ )
print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(a_ )
print(F"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print('Pushing model to the hub...' )
model.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""", commit_message='Add model', use_temp_dir=a_, )
processor.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""", commit_message='Add image processor', use_temp_dir=a_, )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path',
default=None,
type=str,
required=True,
help='Path to EfficientFormer pytorch checkpoint.',
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for EfficientFormer model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
parser.set_defaults(push_to_hub=True)
_A = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 355
|
"""simple docstring"""
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
_A = {
'tiny.en': 'https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt',
'tiny': 'https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt',
'base.en': 'https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt',
'base': 'https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt',
'small.en': 'https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt',
'small': 'https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt',
'medium.en': 'https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt',
'medium': 'https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt',
'large': 'https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt',
'large-v2': 'https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt',
}
def UpperCAmelCase ( a_ ):
'''simple docstring'''
lowerCamelCase : str = ['layers', 'blocks']
for k in ignore_keys:
state_dict.pop(a_, a_ )
_A = {
'blocks': 'layers',
'mlp.0': 'fc1',
'mlp.2': 'fc2',
'mlp_ln': 'final_layer_norm',
'.attn.query': '.self_attn.q_proj',
'.attn.key': '.self_attn.k_proj',
'.attn.value': '.self_attn.v_proj',
'.attn_ln': '.self_attn_layer_norm',
'.attn.out': '.self_attn.out_proj',
'.cross_attn.query': '.encoder_attn.q_proj',
'.cross_attn.key': '.encoder_attn.k_proj',
'.cross_attn.value': '.encoder_attn.v_proj',
'.cross_attn_ln': '.encoder_attn_layer_norm',
'.cross_attn.out': '.encoder_attn.out_proj',
'decoder.ln.': 'decoder.layer_norm.',
'encoder.ln.': 'encoder.layer_norm.',
'token_embedding': 'embed_tokens',
'encoder.positional_embedding': 'encoder.embed_positions.weight',
'decoder.positional_embedding': 'decoder.embed_positions.weight',
'ln_post': 'layer_norm',
}
def UpperCAmelCase ( a_ ):
'''simple docstring'''
lowerCamelCase : Tuple = list(s_dict.keys() )
for key in keys:
lowerCamelCase : List[Any] = key
for k, v in WHISPER_MAPPING.items():
if k in key:
lowerCamelCase : Optional[int] = new_key.replace(a_, a_ )
print(F"""{key} -> {new_key}""" )
lowerCamelCase : Any = s_dict.pop(a_ )
return s_dict
def UpperCAmelCase ( a_ ):
'''simple docstring'''
lowerCamelCase , lowerCamelCase : int = emb.weight.shape
lowerCamelCase : Dict = nn.Linear(a_, a_, bias=a_ )
lowerCamelCase : Union[str, Any] = emb.weight.data
return lin_layer
def UpperCAmelCase ( a_, a_ ):
'''simple docstring'''
os.makedirs(a_, exist_ok=a_ )
lowerCamelCase : Union[str, Any] = os.path.basename(a_ )
lowerCamelCase : Any = url.split('/' )[-2]
lowerCamelCase : Tuple = os.path.join(a_, a_ )
if os.path.exists(a_ ) and not os.path.isfile(a_ ):
raise RuntimeError(F"""{download_target} exists and is not a regular file""" )
if os.path.isfile(a_ ):
lowerCamelCase : Union[str, Any] = open(a_, 'rb' ).read()
if hashlib.shaaaa(a_ ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" )
with urllib.request.urlopen(a_ ) as source, open(a_, 'wb' ) as output:
with tqdm(
total=int(source.info().get('Content-Length' ) ), ncols=80, unit='iB', unit_scale=a_, unit_divisor=1024 ) as loop:
while True:
lowerCamelCase : Union[str, Any] = source.read(8192 )
if not buffer:
break
output.write(a_ )
loop.update(len(a_ ) )
lowerCamelCase : int = open(a_, 'rb' ).read()
if hashlib.shaaaa(a_ ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.' )
return model_bytes
def UpperCAmelCase ( a_, a_ ):
'''simple docstring'''
if ".pt" not in checkpoint_path:
lowerCamelCase : str = _download(_MODELS[checkpoint_path] )
else:
lowerCamelCase : Any = torch.load(a_, map_location='cpu' )
lowerCamelCase : List[str] = original_checkpoint['dims']
lowerCamelCase : Any = original_checkpoint['model_state_dict']
lowerCamelCase : Tuple = state_dict['decoder.token_embedding.weight']
remove_ignore_keys_(a_ )
rename_keys(a_ )
lowerCamelCase : List[Any] = True
lowerCamelCase : str = state_dict['decoder.layers.0.fc1.weight'].shape[0]
lowerCamelCase : Optional[int] = WhisperConfig(
vocab_size=dimensions['n_vocab'], encoder_ffn_dim=a_, decoder_ffn_dim=a_, num_mel_bins=dimensions['n_mels'], d_model=dimensions['n_audio_state'], max_target_positions=dimensions['n_text_ctx'], encoder_layers=dimensions['n_audio_layer'], encoder_attention_heads=dimensions['n_audio_head'], decoder_layers=dimensions['n_text_layer'], decoder_attention_heads=dimensions['n_text_state'], max_source_positions=dimensions['n_audio_ctx'], )
lowerCamelCase : Union[str, Any] = WhisperForConditionalGeneration(a_ )
lowerCamelCase , lowerCamelCase : Optional[int] = model.model.load_state_dict(a_, strict=a_ )
if len(a_ ) > 0 and not set(a_ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'
F""" but all the following weights are missing {missing}""" )
if tie_embeds:
lowerCamelCase : List[Any] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
lowerCamelCase : Tuple = proj_out_weights
model.save_pretrained(a_ )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Patht to the downloaded checkpoints')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
_A = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 205
| 0
|
"""simple docstring"""
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =42
lowerCamelCase__ =42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 102
|
"""simple docstring"""
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
SCREAMING_SNAKE_CASE : List[str] = getLogger(__name__)
def lowercase ( _snake_case : Tuple , _snake_case : str , _snake_case : str , _snake_case : int = 8 , _snake_case : int = 1_024 , _snake_case : Any="val" , _snake_case : Tuple=None , _snake_case : Any=False , _snake_case : str="summarization" , _snake_case : Dict=None , _snake_case : Optional[Any]=1 , _snake_case : Dict = None , _snake_case : List[Any]="" , **_snake_case : int , ) ->Dict:
"""simple docstring"""
__snake_case : int = str(_snake_case )
assert local_rank is not None
torch.distributed.init_process_group(backend='''nccl''' , rank=_snake_case )
__snake_case : Optional[Any] = Path(_snake_case )
__snake_case : str = save_dir.joinpath(f"""rank_{local_rank}_output.json""" )
torch.cuda.set_device(_snake_case )
__snake_case : Tuple = AutoModelForSeqaSeqLM.from_pretrained(_snake_case ).cuda()
if fpaa:
__snake_case : List[str] = model.half()
# determine if we need to increase num_beams
use_task_specific_params(_snake_case , _snake_case ) # update config with task specific params
__snake_case : Dict = generate_kwargs.pop('''num_beams''' , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
__snake_case : Optional[Any] = num_return_sequences
__snake_case : Dict = AutoTokenizer.from_pretrained(_snake_case )
logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
if max_source_length is None:
__snake_case : List[str] = tokenizer.model_max_length
if prefix is None:
__snake_case : List[str] = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
__snake_case : List[str] = SeqaSeqDataset(
_snake_case , _snake_case , _snake_case , max_target_length=1_024 , type_path=_snake_case , n_obs=_snake_case , prefix=_snake_case , **_snake_case , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
__snake_case : Union[str, Any] = ds.make_sortish_sampler(_snake_case , distributed=_snake_case , add_extra_examples=_snake_case , shuffle=_snake_case )
__snake_case : List[Any] = DataLoader(_snake_case , sampler=_snake_case , batch_size=_snake_case , collate_fn=ds.collate_fn )
__snake_case : Union[str, Any] = []
for batch in tqdm(_snake_case ):
__snake_case : Tuple = model.generate(
input_ids=batch['''input_ids'''].to(model.device ) , attention_mask=batch['''attention_mask'''].to(model.device ) , num_return_sequences=_snake_case , num_beams=_snake_case , **_snake_case , )
__snake_case : List[Any] = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case , clean_up_tokenization_spaces=_snake_case )
__snake_case : List[str] = batch['''ids''']
if num_return_sequences > 1:
__snake_case : Dict = chunks(_snake_case , _snake_case ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(_snake_case ):
results.append({'''pred''': pred, '''id''': ids[i].item()} )
save_json(_snake_case , _snake_case )
return results, sampler.num_replicas
def lowercase ( ) ->int:
"""simple docstring"""
__snake_case : Any = argparse.ArgumentParser(
epilog='''Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate''' )
parser.add_argument('''--data_dir''' , type=_snake_case , help='''like cnn_dm/test.source''' )
parser.add_argument(
'''--model_name''' , type=_snake_case , help='''like facebook/bart-large-cnn,t5-base, etc.''' , default='''sshleifer/distilbart-xsum-12-3''' , )
parser.add_argument('''--save_dir''' , type=_snake_case , help='''where to save''' , default='''tmp_gen''' )
parser.add_argument('''--max_source_length''' , type=_snake_case , default=_snake_case )
parser.add_argument(
'''--type_path''' , type=_snake_case , default='''test''' , help='''which subset to evaluate typically train/val/test''' )
parser.add_argument('''--task''' , type=_snake_case , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=_snake_case , default=8 , required=_snake_case , help='''batch size''' )
parser.add_argument(
'''--local_rank''' , type=_snake_case , default=-1 , required=_snake_case , help='''should be passed by distributed.launch''' )
parser.add_argument(
'''--n_obs''' , type=_snake_case , default=_snake_case , required=_snake_case , help='''How many observations. Defaults to all.''' )
parser.add_argument(
'''--num_return_sequences''' , type=_snake_case , default=1 , required=_snake_case , help='''How many sequences to return''' )
parser.add_argument(
'''--sync_timeout''' , type=_snake_case , default=600 , required=_snake_case , help='''How long should master process wait for other processes to finish.''' , )
parser.add_argument('''--src_lang''' , type=_snake_case , default=_snake_case , required=_snake_case )
parser.add_argument('''--tgt_lang''' , type=_snake_case , default=_snake_case , required=_snake_case )
parser.add_argument(
'''--prefix''' , type=_snake_case , required=_snake_case , default=_snake_case , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--debug''' , action='''store_true''' )
__snake_case : str = time.time()
__snake_case , __snake_case : Any = parser.parse_known_args()
__snake_case : List[Any] = parse_numeric_n_bool_cl_kwargs(_snake_case )
if generate_kwargs and args.local_rank <= 0:
print(f"""parsed the following generate kwargs: {generate_kwargs}""" )
__snake_case : List[Any] = Path(args.save_dir + '''_tmp''' )
Path(_snake_case ).mkdir(exist_ok=_snake_case ) # this handles locking.
__snake_case : Optional[int] = list(json_save_dir.glob('''rank_*.json''' ) )
if intermediate_files:
raise ValueError(f"""Found files at {json_save_dir} please move or remove them.""" )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
__snake_case : Dict = {}
if args.src_lang is not None:
__snake_case : Dict = args.src_lang
if args.tgt_lang is not None:
__snake_case : Dict = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=_snake_case )
__snake_case , __snake_case : List[Any] = eval_data_dir(
args.data_dir , _snake_case , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=_snake_case , **_snake_case , )
if args.local_rank <= 0:
__snake_case : int = Path(args.save_dir )
save_dir.mkdir(exist_ok=_snake_case )
__snake_case : Optional[Any] = gather_results_from_each_node(_snake_case , _snake_case , args.sync_timeout )
__snake_case : str = combine_partial_results(_snake_case )
if args.num_return_sequences > 1:
__snake_case : List[Any] = save_dir.joinpath('''pseudolabel_results.json''' )
print(f"""Saving aggregated results at {save_path}, intermediate in {json_save_dir}/""" )
save_json(_snake_case , _snake_case )
return
__snake_case : Tuple = Path(args.data_dir ).joinpath(args.type_path + '''.target''' )
with open(_snake_case ) as f:
__snake_case : Optional[Any] = [x.rstrip() for x in f.readlines()][: len(_snake_case )]
# Calculate metrics, save metrics, and save _generations.txt
__snake_case : List[str] = '''translation''' in args.task
__snake_case : List[Any] = calculate_bleu if calc_bleu else calculate_rouge
__snake_case : Dict = '''bleu''' if calc_bleu else '''rouge'''
__snake_case : Dict = score_fn(_snake_case , _snake_case )
__snake_case : int = len(_snake_case )
__snake_case : Dict = time.time() - start_time
__snake_case : Optional[Any] = round(runtime / metrics['''n_obs'''] , 4 )
__snake_case : List[Any] = num_replicas
# TODO(@stas00): add whatever metadata to metrics
__snake_case : int = save_dir.joinpath(f"""{args.type_path}_{metric_name}.json""" )
save_json(_snake_case , _snake_case , indent=_snake_case )
print(_snake_case )
write_txt_file(_snake_case , save_dir.joinpath(f"""{args.type_path}_generations.txt""" ) )
if args.debug:
write_txt_file(_snake_case , save_dir.joinpath(f"""{args.type_path}.target""" ) )
else:
shutil.rmtree(_snake_case )
def lowercase ( _snake_case : Union[str, Any] ) ->List:
"""simple docstring"""
__snake_case : List[Any] = []
for partial_result in partial_results:
records.extend(_snake_case )
__snake_case : List[str] = sorted(_snake_case , key=lambda _snake_case : x["id"] )
__snake_case : Tuple = [x['''pred'''] for x in records]
return preds
def lowercase ( _snake_case : int , _snake_case : List[str] , _snake_case : List[Any] ) ->List[Dict[str, List]]:
"""simple docstring"""
__snake_case : List[str] = time.time()
logger.info('''waiting for all nodes to finish''' )
__snake_case : List[str] = None
while (time.time() - start_wait) < timeout:
__snake_case : Any = list(save_dir.glob('''rank_*.json''' ) )
if len(_snake_case ) < num_replicas:
continue
try:
# make sure all json files are fully saved
__snake_case : Tuple = lmap(_snake_case , _snake_case )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError('''Rank 0 gave up on waiting for other processes''' )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 102
| 1
|
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
_lowercase : Optional[int] = str(bin(__UpperCAmelCase ) )[2:] # remove the leading "0b"
_lowercase : Any = str(bin(__UpperCAmelCase ) )[2:]
_lowercase : str = max(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) )
return "0b" + "".join(
str(int("""1""" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(__UpperCAmelCase ) , b_binary.zfill(__UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336
|
"""simple docstring"""
import argparse
from collections import defaultdict
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : str = F"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(__UpperCAmelCase , """r""" ) as f:
_lowercase : Any = f.readlines()
_lowercase : Optional[int] = F"""class {class_name}("""
_lowercase : List[str] = F"""{4 * " "}def {test_name}("""
_lowercase : List[Any] = F"""{8 * " "}{correct_line.split()[0]}"""
_lowercase : int = F"""{16 * " "}{correct_line.split()[0]}"""
_lowercase : str = False
_lowercase : Optional[Any] = False
_lowercase : Union[str, Any] = False
_lowercase : Any = False
_lowercase : int = 0
_lowercase : Tuple = 0
_lowercase : Union[str, Any] = []
for line in lines:
if line.startswith(__UpperCAmelCase ):
_lowercase : List[str] = True
elif in_class and line.startswith(__UpperCAmelCase ):
_lowercase : str = True
elif in_class and in_func and (line.startswith(__UpperCAmelCase ) or line.startswith(__UpperCAmelCase )):
_lowercase : Union[str, Any] = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
_lowercase : Optional[int] = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
_lowercase : Optional[Any] = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F"""{spaces * " "}{correct_line}""" )
_lowercase : Union[str, Any] = False
else:
new_lines.append(__UpperCAmelCase )
with open(__UpperCAmelCase , """w""" ) as f:
for line in new_lines:
f.write(__UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase=None ):
if fail is not None:
with open(__UpperCAmelCase , """r""" ) as f:
_lowercase : Dict = {l.strip() for l in f.readlines()}
else:
_lowercase : int = None
with open(__UpperCAmelCase , """r""" ) as f:
_lowercase : int = f.readlines()
_lowercase : int = defaultdict(__UpperCAmelCase )
for line in correct_lines:
_lowercase , _lowercase , _lowercase , _lowercase : int = line.split(""";""" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase: List[Any] = argparse.ArgumentParser()
parser.add_argument("""--correct_filename""", help="""filename of tests with expected result""")
parser.add_argument("""--fail_filename""", help="""filename of test failures""", type=str, default=None)
UpperCAmelCase: Any = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 336
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.