code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = 42
lowercase = None
def __snake_case( _lowerCAmelCase , _lowerCAmelCase=0.999 , _lowerCAmelCase="cosine" , ) -> Optional[int]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(_lowerCAmelCase ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_lowerCAmelCase ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}" )
snake_case__ : Tuple = []
for i in range(_lowerCAmelCase ):
snake_case__ : Union[str, Any] = i / num_diffusion_timesteps
snake_case__ : str = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_lowerCAmelCase ) / alpha_bar_fn(_lowerCAmelCase ) , _lowerCAmelCase ) )
return torch.tensor(_lowerCAmelCase , dtype=torch.floataa )
class UpperCAmelCase_ ( _a , _a ):
"""simple docstring"""
lowercase = 1
@register_to_config
def __init__( self : Optional[int] , snake_case_ : int = 1_000 , snake_case_ : float = 0.0001 , snake_case_ : float = 0.02 , snake_case_ : str = "linear" , snake_case_ : Optional[Union[np.ndarray, List[float]]] = None , snake_case_ : bool = True , snake_case_ : bool = True , snake_case_ : int = 0 , snake_case_ : str = "epsilon" , snake_case_ : float = 1.0 , **snake_case_ : List[Any] , ):
if kwargs.get("""set_alpha_to_one""" , snake_case_ ) is not None:
snake_case__ : Union[str, Any] = (
"""The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead."""
)
deprecate("""set_alpha_to_one""" , """1.0.0""" , snake_case_ , standard_warn=snake_case_ )
snake_case__ : List[Any] = kwargs["""set_alpha_to_one"""]
if trained_betas is not None:
snake_case__ : List[str] = torch.tensor(snake_case_ , dtype=torch.floataa )
elif beta_schedule == "linear":
snake_case__ : Union[str, Any] = torch.linspace(snake_case_ , snake_case_ , snake_case_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
snake_case__ : Any = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , snake_case_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
snake_case__ : List[str] = betas_for_alpha_bar(snake_case_ )
else:
raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}" )
snake_case__ : Dict = 1.0 - self.betas
snake_case__ : List[str] = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
snake_case__ : Optional[Any] = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
snake_case__ : Any = 1.0
# setable values
snake_case__ : Any = None
snake_case__ : Tuple = torch.from_numpy(np.arange(0 , snake_case_ ).copy().astype(np.intaa ) )
def lowerCamelCase ( self : int , snake_case_ : torch.FloatTensor , snake_case_ : Optional[int] = None ):
return sample
def lowerCamelCase ( self : Optional[int] , snake_case_ : int , snake_case_ : Union[str, torch.device] = None ):
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
f"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:"
f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"
f" maximal {self.config.num_train_timesteps} timesteps." )
snake_case__ : List[str] = num_inference_steps
snake_case__ : Tuple = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
snake_case__ : List[Any] = (np.arange(0 , snake_case_ ) * step_ratio).round().copy().astype(np.intaa )
snake_case__ : Tuple = torch.from_numpy(snake_case_ ).to(snake_case_ )
self.timesteps += self.config.steps_offset
def lowerCamelCase ( self : Tuple , snake_case_ : torch.FloatTensor , snake_case_ : int , snake_case_ : torch.FloatTensor , snake_case_ : float = 0.0 , snake_case_ : bool = False , snake_case_ : Optional[torch.FloatTensor] = None , snake_case_ : bool = True , ):
# 1. get previous step value (=t+1)
snake_case__ : Any = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
snake_case__ : Optional[int] = self.alphas_cumprod[timestep]
snake_case__ : Union[str, Any] = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
snake_case__ : int = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
snake_case__ : Any = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
snake_case__ : Optional[int] = model_output
elif self.config.prediction_type == "sample":
snake_case__ : Union[str, Any] = model_output
snake_case__ : Optional[int] = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
snake_case__ : str = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
snake_case__ : Any = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"
""" `v_prediction`""" )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
snake_case__ : Tuple = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
snake_case__ : Any = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
snake_case__ : Optional[int] = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=snake_case_ , pred_original_sample=snake_case_ )
def __len__( self : Tuple ):
return self.config.num_train_timesteps
| 374 |
'''simple docstring'''
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
__a = "base_with_context"
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
snake_case__ : Optional[Any] = nn.Parameter(torch.FloatTensor(weights["""token_embedder"""]["""embedding"""] ) )
snake_case__ : Dict = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=_lowerCAmelCase )
for lyr_num, lyr in enumerate(model.encoders ):
snake_case__ : List[str] = weights[f"layers_{lyr_num}"]
snake_case__ : Dict = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) )
snake_case__ : List[str] = ly_weight["""attention"""]
snake_case__ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
snake_case__ : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
snake_case__ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
snake_case__ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
snake_case__ : int = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
snake_case__ : List[str] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
snake_case__ : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
snake_case__ : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
snake_case__ : Tuple = nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) )
return model
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
snake_case__ : Tuple = nn.Parameter(torch.FloatTensor(weights["""input_proj"""]["""kernel"""].T ) )
snake_case__ : str = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=_lowerCAmelCase )
for lyr_num, lyr in enumerate(model.encoders ):
snake_case__ : List[Any] = weights[f"layers_{lyr_num}"]
snake_case__ : List[str] = ly_weight["""attention"""]
snake_case__ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
snake_case__ : Dict = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
snake_case__ : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
snake_case__ : Any = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
snake_case__ : Optional[Any] = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) )
snake_case__ : Dict = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
snake_case__ : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
snake_case__ : Dict = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
snake_case__ : Dict = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
snake_case__ : str = nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) )
return model
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
snake_case__ : int = nn.Parameter(torch.FloatTensor(weights["""time_emb_dense0"""]["""kernel"""].T ) )
snake_case__ : List[Any] = nn.Parameter(torch.FloatTensor(weights["""time_emb_dense1"""]["""kernel"""].T ) )
snake_case__ : List[Any] = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=_lowerCAmelCase )
snake_case__ : Optional[Any] = nn.Parameter(
torch.FloatTensor(weights["""continuous_inputs_projection"""]["""kernel"""].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
snake_case__ : List[Any] = weights[f"layers_{lyr_num}"]
snake_case__ : Optional[int] = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_self_attention_layer_norm"""]["""scale"""] ) )
snake_case__ : List[str] = nn.Parameter(
torch.FloatTensor(ly_weight["""FiLMLayer_0"""]["""DenseGeneral_0"""]["""kernel"""].T ) )
snake_case__ : Any = ly_weight["""self_attention"""]
snake_case__ : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
snake_case__ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
snake_case__ : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
snake_case__ : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
snake_case__ : int = ly_weight["""MultiHeadDotProductAttention_0"""]
snake_case__ : int = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
snake_case__ : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
snake_case__ : Any = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
snake_case__ : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
snake_case__ : List[str] = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_cross_attention_layer_norm"""]["""scale"""] ) )
snake_case__ : Any = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
snake_case__ : Union[str, Any] = nn.Parameter(
torch.FloatTensor(ly_weight["""FiLMLayer_1"""]["""DenseGeneral_0"""]["""kernel"""].T ) )
snake_case__ : str = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
snake_case__ : Any = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
snake_case__ : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
snake_case__ : Tuple = nn.Parameter(torch.FloatTensor(weights["""decoder_norm"""]["""scale"""] ) )
snake_case__ : int = nn.Parameter(torch.FloatTensor(weights["""spec_out_dense"""]["""kernel"""].T ) )
return model
def __snake_case( _lowerCAmelCase ) -> Union[str, Any]:
snake_case__ : Tuple = checkpoints.load_tax_checkpoint(args.checkpoint_path )
snake_case__ : List[str] = jnp.tree_util.tree_map(onp.array , _lowerCAmelCase )
snake_case__ : int = [
"""from __gin__ import dynamic_registration""",
"""from music_spectrogram_diffusion.models.diffusion import diffusion_utils""",
"""diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0""",
"""diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()""",
]
snake_case__ : Any = os.path.join(args.checkpoint_path , """..""" , """config.gin""" )
snake_case__ : Dict = inference.parse_training_gin_file(_lowerCAmelCase , _lowerCAmelCase )
snake_case__ : int = inference.InferenceModel(args.checkpoint_path , _lowerCAmelCase )
snake_case__ : Optional[int] = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" , variance_type="""fixed_large""" )
snake_case__ : Union[str, Any] = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["""inputs"""] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="""gated-gelu""" , )
snake_case__ : Union[str, Any] = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["""targets_context"""] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="""gated-gelu""" , )
snake_case__ : Any = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["""targets_context"""] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
snake_case__ : Optional[int] = load_notes_encoder(ta_checkpoint["""target"""]["""token_encoder"""] , _lowerCAmelCase )
snake_case__ : Union[str, Any] = load_continuous_encoder(ta_checkpoint["""target"""]["""continuous_encoder"""] , _lowerCAmelCase )
snake_case__ : Optional[Any] = load_decoder(ta_checkpoint["""target"""]["""decoder"""] , _lowerCAmelCase )
snake_case__ : str = OnnxRuntimeModel.from_pretrained("""kashif/soundstream_mel_decoder""" )
snake_case__ : Dict = SpectrogramDiffusionPipeline(
notes_encoder=_lowerCAmelCase , continuous_encoder=_lowerCAmelCase , decoder=_lowerCAmelCase , scheduler=_lowerCAmelCase , melgan=_lowerCAmelCase , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument("--output_path", default=None, type=str, required=True, help="Path to the converted model.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument(
"--checkpoint_path",
default=F"{MODEL}/checkpoint_500000",
type=str,
required=False,
help="Path to the original jax model checkpoint.",
)
__a = parser.parse_args()
main(args)
| 374 | 1 |
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class UpperCamelCase ( unittest.TestCase ):
@parameterized.expand([(None,), ("foo.json",)] )
def __A ( self , UpperCAmelCase__ ):
A__ = GenerationConfig(
do_sample=UpperCAmelCase__ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase__ , config_name=UpperCAmelCase__ )
A__ = GenerationConfig.from_pretrained(UpperCAmelCase__ , config_name=UpperCAmelCase__ )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , UpperCAmelCase__ )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , UpperCAmelCase__ )
def __A ( self ):
A__ = AutoConfig.from_pretrained("gpt2" )
A__ = GenerationConfig.from_model_config(UpperCAmelCase__ )
A__ = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(UpperCAmelCase__ , UpperCAmelCase__ )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def __A ( self ):
A__ = GenerationConfig()
A__ = {
"max_new_tokens": 1_024,
"foo": "bar",
}
A__ = copy.deepcopy(UpperCAmelCase__ )
A__ = generation_config.update(**UpperCAmelCase__ )
# update_kwargs was not modified (no side effects)
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(UpperCAmelCase__ , {"foo": "bar"} )
def __A ( self ):
A__ = GenerationConfig()
A__ = "bar"
with tempfile.TemporaryDirectory("test-generation-config" ) as tmp_dir:
generation_config.save_pretrained(UpperCAmelCase__ )
A__ = GenerationConfig.from_pretrained(UpperCAmelCase__ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , "bar" )
A__ = GenerationConfig.from_model_config(UpperCAmelCase__ )
assert not hasattr(UpperCAmelCase__ , "foo" ) # no new kwargs should be initialized if from config
def __A ( self ):
A__ = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , UpperCAmelCase__ )
self.assertEqual(default_config.num_beams , 1 )
A__ = GenerationConfig(
do_sample=UpperCAmelCase__ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , UpperCAmelCase__ )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase__ )
A__ = GenerationConfig.from_pretrained(UpperCAmelCase__ , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , UpperCAmelCase__ )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class UpperCamelCase ( unittest.TestCase ):
@classmethod
def __A ( cls ):
A__ = TOKEN
HfFolder.save_token(UpperCAmelCase__ )
@classmethod
def __A ( cls ):
try:
delete_repo(token=cls._token , repo_id="test-generation-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-generation-config-org" )
except HTTPError:
pass
def __A ( self ):
A__ = GenerationConfig(
do_sample=UpperCAmelCase__ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("test-generation-config" , use_auth_token=self._token )
A__ = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase__ , getattr(UpperCAmelCase__ , UpperCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-generation-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCAmelCase__ , repo_id="test-generation-config" , push_to_hub=UpperCAmelCase__ , use_auth_token=self._token )
A__ = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase__ , getattr(UpperCAmelCase__ , UpperCAmelCase__ ) )
def __A ( self ):
A__ = GenerationConfig(
do_sample=UpperCAmelCase__ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("valid_org/test-generation-config-org" , use_auth_token=self._token )
A__ = GenerationConfig.from_pretrained("valid_org/test-generation-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase__ , getattr(UpperCAmelCase__ , UpperCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-generation-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCAmelCase__ , repo_id="valid_org/test-generation-config-org" , push_to_hub=UpperCAmelCase__ , use_auth_token=self._token )
A__ = GenerationConfig.from_pretrained("valid_org/test-generation-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase__ , getattr(UpperCAmelCase__ , UpperCAmelCase__ ) )
| 232 |
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def UpperCamelCase ( _A : int , _A : List[str] )-> List[str]:
"""simple docstring"""
A__ = checkpoint
A__ = {}
A__ = vae_state_dict["encoder.conv_in.weight"]
A__ = vae_state_dict["encoder.conv_in.bias"]
A__ = vae_state_dict["encoder.conv_out.weight"]
A__ = vae_state_dict["encoder.conv_out.bias"]
A__ = vae_state_dict["encoder.norm_out.weight"]
A__ = vae_state_dict["encoder.norm_out.bias"]
A__ = vae_state_dict["decoder.conv_in.weight"]
A__ = vae_state_dict["decoder.conv_in.bias"]
A__ = vae_state_dict["decoder.conv_out.weight"]
A__ = vae_state_dict["decoder.conv_out.bias"]
A__ = vae_state_dict["decoder.norm_out.weight"]
A__ = vae_state_dict["decoder.norm_out.bias"]
A__ = vae_state_dict["quant_conv.weight"]
A__ = vae_state_dict["quant_conv.bias"]
A__ = vae_state_dict["post_quant_conv.weight"]
A__ = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
A__ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
A__ = {
layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(_A )
}
# Retrieves the keys for the decoder up blocks only
A__ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
A__ = {
layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(_A )
}
for i in range(_A ):
A__ = [key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key]
if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
A__ = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.weight""" )
A__ = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.bias""" )
A__ = renew_vae_resnet_paths(_A )
A__ = {"old": f"""down.{i}.block""", "new": f"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(_A , _A , _A , additional_replacements=[meta_path] , config=_A )
A__ = [key for key in vae_state_dict if "encoder.mid.block" in key]
A__ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
A__ = [key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key]
A__ = renew_vae_resnet_paths(_A )
A__ = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(_A , _A , _A , additional_replacements=[meta_path] , config=_A )
A__ = [key for key in vae_state_dict if "encoder.mid.attn" in key]
A__ = renew_vae_attention_paths(_A )
A__ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(_A , _A , _A , additional_replacements=[meta_path] , config=_A )
conv_attn_to_linear(_A )
for i in range(_A ):
A__ = num_up_blocks - 1 - i
A__ = [
key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key
]
if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
A__ = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.weight"""
]
A__ = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.bias"""
]
A__ = renew_vae_resnet_paths(_A )
A__ = {"old": f"""up.{block_id}.block""", "new": f"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(_A , _A , _A , additional_replacements=[meta_path] , config=_A )
A__ = [key for key in vae_state_dict if "decoder.mid.block" in key]
A__ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
A__ = [key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key]
A__ = renew_vae_resnet_paths(_A )
A__ = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(_A , _A , _A , additional_replacements=[meta_path] , config=_A )
A__ = [key for key in vae_state_dict if "decoder.mid.attn" in key]
A__ = renew_vae_attention_paths(_A )
A__ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(_A , _A , _A , additional_replacements=[meta_path] , config=_A )
conv_attn_to_linear(_A )
return new_checkpoint
def UpperCamelCase ( _A : str , _A : str , )-> str:
"""simple docstring"""
A__ = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
A__ = io.BytesIO(r.content )
A__ = OmegaConf.load(_A )
A__ = 512
A__ = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
A__ = {}
with safe_open(_A , framework="pt" , device="cpu" ) as f:
for key in f.keys():
A__ = f.get_tensor(_A )
else:
A__ = torch.load(_A , map_location=_A )["state_dict"]
# Convert the VAE model.
A__ = create_vae_diffusers_config(_A , image_size=_A )
A__ = custom_convert_ldm_vae_checkpoint(_A , _A )
A__ = AutoencoderKL(**_A )
vae.load_state_dict(_A )
vae.save_pretrained(_A )
if __name__ == "__main__":
UpperCAmelCase_ : Tuple = argparse.ArgumentParser()
parser.add_argument("--vae_pt_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
UpperCAmelCase_ : List[str] = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 232 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : str = "encoder-decoder"
__A : int = True
def __init__( self , **__A ):
"""simple docstring"""
super().__init__(**__A )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
lowerCamelCase : Dict = kwargs.pop("encoder" )
lowerCamelCase : Optional[Any] = encoder_config.pop("model_type" )
lowerCamelCase : Optional[Any] = kwargs.pop("decoder" )
lowerCamelCase : List[Any] = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
lowerCamelCase : str = AutoConfig.for_model(__A , **__A )
lowerCamelCase : Tuple = AutoConfig.for_model(__A , **__A )
lowerCamelCase : int = True
@classmethod
def _snake_case ( cls , __A , __A , **__A ):
"""simple docstring"""
logger.info("Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config" )
lowerCamelCase : int = True
lowerCamelCase : Any = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[int] = copy.deepcopy(self.__dict__ )
lowerCamelCase : Union[str, Any] = self.encoder.to_dict()
lowerCamelCase : Union[str, Any] = self.decoder.to_dict()
lowerCamelCase : Optional[Any] = self.__class__.model_type
return output
| 340 |
import copy
import random
from transformers import CLIPTokenizer
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
def __init__( self , *__A , **__A ):
"""simple docstring"""
super().__init__(*__A , **__A )
lowerCamelCase : Dict = {}
def _snake_case ( self , __A , *__A , **__A ):
"""simple docstring"""
lowerCamelCase : int = super().add_tokens(__A , *__A , **__A )
if num_added_tokens == 0:
raise ValueError(
F"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
" `placeholder_token` that is not already in the tokenizer." )
def _snake_case ( self , __A , *__A , __A=1 , **__A ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = []
if num_vec_per_token == 1:
self.try_adding_tokens(__A , *__A , **__A )
output.append(__A )
else:
lowerCamelCase : Any = []
for i in range(__A ):
lowerCamelCase : List[str] = placeholder_token + F"""_{i}"""
self.try_adding_tokens(__A , *__A , **__A )
output.append(__A )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F"""The tokenizer already has placeholder token {token} that can get confused with"""
F""" {placeholder_token}keep placeholder tokens independent""" )
lowerCamelCase : Tuple = output
def _snake_case ( self , __A , __A=False , __A=1.0 ):
"""simple docstring"""
if isinstance(__A , __A ):
lowerCamelCase : Optional[Any] = []
for i in range(len(__A ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=__A ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
lowerCamelCase : Optional[int] = self.token_map[placeholder_token]
lowerCamelCase : str = tokens[: 1 + int(len(__A ) * prop_tokens_to_load )]
if vector_shuffle:
lowerCamelCase : List[str] = copy.copy(__A )
random.shuffle(__A )
lowerCamelCase : Any = text.replace(__A , " ".join(__A ) )
return text
def __call__( self , __A , *__A , __A=False , __A=1.0 , **__A ):
"""simple docstring"""
return super().__call__(
self.replace_placeholder_tokens_in_text(
__A , vector_shuffle=__A , prop_tokens_to_load=__A ) , *__A , **__A , )
def _snake_case ( self , __A , *__A , __A=False , __A=1.0 , **__A ):
"""simple docstring"""
return super().encode(
self.replace_placeholder_tokens_in_text(
__A , vector_shuffle=__A , prop_tokens_to_load=__A ) , *__A , **__A , )
| 340 | 1 |
'''simple docstring'''
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
lowercase__ : Dict = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
lowercase__ : Optional[int] = '''main'''
# Default branch name
lowercase__ : List[str] = '''f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'''
# One particular commit (not the top of `main`)
lowercase__ : Optional[int] = '''aaaaaaa'''
# This commit does not exist, so we should 404.
lowercase__ : Any = '''d9e9f15bc825e4b2c9249e9578f884bbcb5e3684'''
# Sha-1 of config.json on the top of `main`, for checking purposes
lowercase__ : int = '''4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'''
@contextlib.contextmanager
def _lowerCAmelCase ( ) -> Dict:
print('Welcome!' )
yield
print('Bye!' )
@contextlib.contextmanager
def _lowerCAmelCase ( ) -> Optional[int]:
print('Bonjour!' )
yield
print('Au revoir!' )
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
assert transformers.__spec__ is not None
assert importlib.util.find_spec('transformers') is not None
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
with ContextManagers([]):
print('Transformers are awesome!')
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , 'Transformers are awesome!\n')
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
with ContextManagers([context_en()]):
print('Transformers are awesome!')
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , 'Welcome!\nTransformers are awesome!\nBye!\n')
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
with ContextManagers([context_fr(), context_en()]):
print('Transformers are awesome!')
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , 'Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n')
@require_torch
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.assertEqual(find_labels(_UpperCAmelCase) , ['labels'])
self.assertEqual(find_labels(_UpperCAmelCase) , ['labels', 'next_sentence_label'])
self.assertEqual(find_labels(_UpperCAmelCase) , ['start_positions', 'end_positions'])
class SCREAMING_SNAKE_CASE (a__ ):
pass
self.assertEqual(find_labels(_UpperCAmelCase) , ['labels'])
@require_tf
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.assertEqual(find_labels(_UpperCAmelCase) , ['labels'])
self.assertEqual(find_labels(_UpperCAmelCase) , ['labels', 'next_sentence_label'])
self.assertEqual(find_labels(_UpperCAmelCase) , ['start_positions', 'end_positions'])
class SCREAMING_SNAKE_CASE (a__ ):
pass
self.assertEqual(find_labels(_UpperCAmelCase) , ['labels'])
@require_flax
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.assertEqual(find_labels(_UpperCAmelCase) , [])
self.assertEqual(find_labels(_UpperCAmelCase) , [])
self.assertEqual(find_labels(_UpperCAmelCase) , [])
class SCREAMING_SNAKE_CASE (a__ ):
pass
self.assertEqual(find_labels(_UpperCAmelCase) , [])
| 707 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase__ : str = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Dict = [
'''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''UniSpeechForCTC''',
'''UniSpeechForPreTraining''',
'''UniSpeechForSequenceClassification''',
'''UniSpeechModel''',
'''UniSpeechPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
lowercase__ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 338 | 0 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
a_ :Tuple = logging.get_logger(__name__)
class lowercase :
def __init__( self : str , _lowercase : str = None , _lowercase : uuid.UUID = None , _lowercase : Any=None , _lowercase : Optional[int]=None ):
if not conversation_id:
SCREAMING_SNAKE_CASE__ : Dict = uuid.uuida()
if past_user_inputs is None:
SCREAMING_SNAKE_CASE__ : Dict = []
if generated_responses is None:
SCREAMING_SNAKE_CASE__ : Dict = []
SCREAMING_SNAKE_CASE__ : uuid.UUID = conversation_id
SCREAMING_SNAKE_CASE__ : List[str] = past_user_inputs
SCREAMING_SNAKE_CASE__ : List[str] = generated_responses
SCREAMING_SNAKE_CASE__ : Optional[str] = text
def __eq__( self : Optional[int] , _lowercase : Dict ):
if not isinstance(_lowercase , _lowercase ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def lowercase__ ( self : Dict , _lowercase : str , _lowercase : bool = False ):
if self.new_user_input:
if overwrite:
logger.warning(
f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """
f"""with: \"{text}\".""" )
SCREAMING_SNAKE_CASE__ : int = text
else:
logger.warning(
f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """
f"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = text
def lowercase__ ( self : int ):
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
SCREAMING_SNAKE_CASE__ : int = None
def lowercase__ ( self : List[Any] , _lowercase : str ):
self.generated_responses.append(_lowercase )
def lowercase__ ( self : Union[str, Any] ):
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : str ):
SCREAMING_SNAKE_CASE__ : Dict = f"""Conversation id: {self.uuid} \n"""
for is_user, text in self.iter_texts():
SCREAMING_SNAKE_CASE__ : List[str] = '''user''' if is_user else '''bot'''
output += f"""{name} >> {text} \n"""
return output
@add_end_docstrings(
_UpperCAmelCase , r'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''' , )
class lowercase ( _UpperCAmelCase ):
def __init__( self : Dict , *_lowercase : List[str] , **_lowercase : List[Any] ):
super().__init__(*_lowercase , **_lowercase )
if self.tokenizer.pad_token_id is None:
SCREAMING_SNAKE_CASE__ : Tuple = self.tokenizer.eos_token
def lowercase__ ( self : str , _lowercase : List[Any]=None , _lowercase : Tuple=None , _lowercase : Tuple=None , **_lowercase : List[Any] ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {}
SCREAMING_SNAKE_CASE__ : List[Any] = {}
SCREAMING_SNAKE_CASE__ : int = {}
if min_length_for_response is not None:
SCREAMING_SNAKE_CASE__ : str = min_length_for_response
if minimum_tokens is not None:
SCREAMING_SNAKE_CASE__ : Dict = minimum_tokens
if "max_length" in generate_kwargs:
SCREAMING_SNAKE_CASE__ : Optional[Any] = generate_kwargs['''max_length''']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(_lowercase )
return preprocess_params, forward_params, postprocess_params
def __call__( self : List[str] , _lowercase : Union[Conversation, List[Conversation]] , _lowercase : Optional[Any]=0 , **_lowercase : Optional[int] ):
SCREAMING_SNAKE_CASE__ : Any = super().__call__(_lowercase , num_workers=_lowercase , **_lowercase )
if isinstance(_lowercase , _lowercase ) and len(_lowercase ) == 1:
return outputs[0]
return outputs
def lowercase__ ( self : int , _lowercase : Conversation , _lowercase : str=32 ):
if not isinstance(_lowercase , _lowercase ):
raise ValueError('''ConversationalPipeline, expects Conversation as inputs''' )
if conversation.new_user_input is None:
raise ValueError(
f"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """
'''Add user inputs with the conversation\'s `add_user_input` method''' )
if hasattr(self.tokenizer , '''_build_conversation_input_ids''' ):
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer._build_conversation_input_ids(_lowercase )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
SCREAMING_SNAKE_CASE__ : Any = self._legacy_parse_and_tokenize(_lowercase )
if self.framework == "pt":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.LongTensor([input_ids] )
elif self.framework == "tf":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def lowercase__ ( self : str , _lowercase : Optional[Any] , _lowercase : Tuple=10 , **_lowercase : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Tuple = generate_kwargs.get('''max_length''' , self.model.config.max_length )
SCREAMING_SNAKE_CASE__ : Tuple = model_inputs['''input_ids'''].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" )
SCREAMING_SNAKE_CASE__ : List[Any] = max_length - minimum_tokens
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model_inputs['''input_ids'''][:, -trim:]
if "attention_mask" in model_inputs:
SCREAMING_SNAKE_CASE__ : Optional[int] = model_inputs['''attention_mask'''][:, -trim:]
SCREAMING_SNAKE_CASE__ : Optional[int] = model_inputs.pop('''conversation''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_length
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model.generate(**_lowercase , **_lowercase )
if self.model.config.is_encoder_decoder:
SCREAMING_SNAKE_CASE__ : int = 1
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def lowercase__ ( self : Optional[Any] , _lowercase : str , _lowercase : int=True ):
SCREAMING_SNAKE_CASE__ : Tuple = model_outputs['''output_ids''']
SCREAMING_SNAKE_CASE__ : str = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase , )
SCREAMING_SNAKE_CASE__ : str = model_outputs['''conversation''']
conversation.mark_processed()
conversation.append_response(_lowercase )
return conversation
def lowercase__ ( self : Union[str, Any] , _lowercase : Conversation ):
SCREAMING_SNAKE_CASE__ : str = self.tokenizer.eos_token_id
SCREAMING_SNAKE_CASE__ : List[Any] = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) )
if len(_lowercase ) > self.tokenizer.model_max_length:
SCREAMING_SNAKE_CASE__ : Tuple = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 35 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def __UpperCamelCase ( lowerCAmelCase__ : List[str] ):
__a : Any = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class UpperCamelCase__ ( __lowercase ,__lowercase ,__lowercase ,unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Any = StableDiffusionLatentUpscalePipeline
_SCREAMING_SNAKE_CASE : Dict = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"height",
"width",
"cross_attention_kwargs",
"negative_prompt_embeds",
"prompt_embeds",
}
_SCREAMING_SNAKE_CASE : Union[str, Any] = PipelineTesterMixin.required_optional_params - {"num_images_per_prompt"}
_SCREAMING_SNAKE_CASE : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_SCREAMING_SNAKE_CASE : Union[str, Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_SCREAMING_SNAKE_CASE : Optional[int] = frozenset([] )
_SCREAMING_SNAKE_CASE : Optional[int] = True
@property
def lowerCAmelCase (self : Optional[int] ):
__a : Union[str, Any] = 1
__a : Dict = 4
__a : int = (1_6, 1_6)
__a : Tuple = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(snake_case_ )
return image
def lowerCAmelCase (self : int ):
torch.manual_seed(0 )
__a : Dict = UNetaDConditionModel(
act_fn='''gelu''' , attention_head_dim=8 , norm_num_groups=snake_case_ , block_out_channels=[3_2, 3_2, 6_4, 6_4] , time_cond_proj_dim=1_6_0 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=3_2 , down_block_types=(
'''KDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
) , in_channels=8 , mid_block_type=snake_case_ , only_cross_attention=snake_case_ , out_channels=5 , resnet_time_scale_shift='''scale_shift''' , time_embedding_type='''fourier''' , timestep_post_act='''gelu''' , up_block_types=('''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KUpBlock2D''') , )
__a : Optional[int] = AutoencoderKL(
block_out_channels=[3_2, 3_2, 6_4, 6_4] , in_channels=3 , out_channels=3 , down_block_types=[
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
__a : Dict = EulerDiscreteScheduler(prediction_type='''sample''' )
__a : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''quick_gelu''' , projection_dim=5_1_2 , )
__a : int = CLIPTextModel(snake_case_ )
__a : Any = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__a : Tuple = {
'''unet''': model.eval(),
'''vae''': vae.eval(),
'''scheduler''': scheduler,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def lowerCAmelCase (self : List[str] , snake_case_ : Tuple , snake_case_ : List[Any]=0 ):
if str(snake_case_ ).startswith('''mps''' ):
__a : Any = torch.manual_seed(snake_case_ )
else:
__a : Optional[Any] = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
__a : Tuple = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': self.dummy_image.cpu(),
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase (self : Tuple ):
__a : Optional[int] = '''cpu'''
__a : Union[str, Any] = self.get_dummy_components()
__a : Any = self.pipeline_class(**snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
__a : int = self.get_dummy_inputs(snake_case_ )
__a : Dict = pipe(**snake_case_ ).images
__a : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 2_5_6, 2_5_6, 3) )
__a : List[str] = np.array(
[0.4722_2412, 0.4192_1633, 0.4471_7434, 0.4687_4192, 0.4258_8258, 0.4615_0726, 0.467_7534, 0.4558_3832, 0.4857_9055] )
__a : int = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(snake_case_ , 1E-3 )
def lowerCAmelCase (self : Tuple ):
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def lowerCAmelCase (self : Any ):
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def lowerCAmelCase (self : Optional[Any] ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def lowerCAmelCase (self : Optional[Any] ):
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def lowerCAmelCase (self : List[str] ):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def lowerCAmelCase (self : Tuple ):
super().test_save_load_local(expected_max_difference=3E-3 )
def lowerCAmelCase (self : Optional[int] ):
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def lowerCAmelCase (self : Union[str, Any] ):
__a : List[Any] = [
'''DDIMScheduler''',
'''DDPMScheduler''',
'''PNDMScheduler''',
'''HeunDiscreteScheduler''',
'''EulerAncestralDiscreteScheduler''',
'''KDPM2DiscreteScheduler''',
'''KDPM2AncestralDiscreteScheduler''',
'''DPMSolverSDEScheduler''',
]
__a : List[str] = self.get_dummy_components()
__a : List[Any] = self.pipeline_class(**snake_case_ )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
__a : Optional[Any] = self.get_dummy_inputs(snake_case_ )
__a : List[Any] = 2
__a : str = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
__a : Union[str, Any] = getattr(snake_case_ , scheduler_enum.name )
__a : Any = scheduler_cls.from_config(pipe.scheduler.config )
__a : Any = pipe(**snake_case_ )[0]
outputs.append(snake_case_ )
assert check_same_shape(snake_case_ )
@require_torch_gpu
@slow
class UpperCamelCase__ ( unittest.TestCase ):
def lowerCAmelCase (self : Any ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase (self : Union[str, Any] ):
__a : Union[str, Any] = torch.manual_seed(3_3 )
__a : List[Any] = StableDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' , torch_dtype=torch.floataa )
pipe.to('''cuda''' )
__a : str = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
__a : int = '''a photo of an astronaut high resolution, unreal engine, ultra realistic'''
__a : Dict = pipe(snake_case_ , generator=snake_case_ , output_type='''latent''' ).images
__a : Any = upscaler(
prompt=snake_case_ , image=snake_case_ , num_inference_steps=2_0 , guidance_scale=0 , generator=snake_case_ , output_type='''np''' , ).images[0]
__a : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy''' )
assert np.abs((expected_image - image).mean() ) < 5E-2
def lowerCAmelCase (self : List[Any] ):
__a : int = torch.manual_seed(3_3 )
__a : Union[str, Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
__a : Optional[int] = '''the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'''
__a : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png''' )
__a : Any = upscaler(
prompt=snake_case_ , image=snake_case_ , num_inference_steps=2_0 , guidance_scale=0 , generator=snake_case_ , output_type='''np''' , ).images[0]
__a : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy''' )
assert np.abs((expected_image - image).max() ) < 5E-2
| 521 | 0 |
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowercase ( __lowercase ):
'''simple docstring'''
def __init__( self , _A , _A = None , _A = None , _A = None , _A = False , _A = False , _A = None , _A = None , **_A , ) -> Tuple:
"""simple docstring"""
super().__init__(
_A , split=_A , features=_A , cache_dir=_A , keep_in_memory=_A , streaming=_A , num_proc=_A , **_A , )
_UpperCAmelCase : List[Any] = field
_UpperCAmelCase : Union[str, Any] = path_or_paths if isinstance(_A , _A) else {self.split: path_or_paths}
_UpperCAmelCase : Dict = Json(
cache_dir=_A , data_files=_A , features=_A , field=_A , **_A , )
def snake_case__ ( self) -> Tuple:
"""simple docstring"""
if self.streaming:
_UpperCAmelCase : Any = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
_UpperCAmelCase : List[Any] = None
_UpperCAmelCase : Optional[Any] = None
_UpperCAmelCase : Tuple = None
_UpperCAmelCase : List[str] = None
self.builder.download_and_prepare(
download_config=_A , download_mode=_A , verification_mode=_A , base_path=_A , num_proc=self.num_proc , )
_UpperCAmelCase : Optional[Any] = self.builder.as_dataset(
split=self.split , verification_mode=_A , in_memory=self.keep_in_memory)
return dataset
class lowercase :
'''simple docstring'''
def __init__( self , _A , _A , _A = None , _A = None , **_A , ) -> int:
"""simple docstring"""
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''')
_UpperCAmelCase : Optional[int] = dataset
_UpperCAmelCase : Union[str, Any] = path_or_buf
_UpperCAmelCase : Dict = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_UpperCAmelCase : Optional[int] = num_proc
_UpperCAmelCase : Optional[int] = '''utf-8'''
_UpperCAmelCase : Any = to_json_kwargs
def snake_case__ ( self) -> int:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = self.to_json_kwargs.pop('''path_or_buf''' , _A)
_UpperCAmelCase : List[Any] = self.to_json_kwargs.pop('''orient''' , '''records''')
_UpperCAmelCase : Tuple = self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False)
_UpperCAmelCase : int = self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True)
_UpperCAmelCase : Optional[int] = self.to_json_kwargs.pop('''compression''' , _A)
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f'''`datasets` currently does not support {compression} compression''')
if isinstance(self.path_or_buf , (str, bytes, os.PathLike)):
with fsspec.open(self.path_or_buf , '''wb''' , compression=_A) as buffer:
_UpperCAmelCase : str = self._write(file_obj=_A , orient=_A , lines=_A , index=_A , **self.to_json_kwargs)
else:
if compression:
raise NotImplementedError(
f'''The compression parameter is not supported when writing to a buffer, but compression={compression}'''
''' was passed. Please provide a local path instead.''')
_UpperCAmelCase : str = self._write(
file_obj=self.path_or_buf , orient=_A , lines=_A , index=_A , **self.to_json_kwargs)
return written
def snake_case__ ( self , _A) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = args
_UpperCAmelCase : Any = query_table(
table=self.dataset.data , key=slice(_A , offset + self.batch_size) , indices=self.dataset._indices , )
_UpperCAmelCase : List[Any] = batch.to_pandas().to_json(
path_or_buf=_A , orient=_A , lines=_A , index=_A , **_A)
if not json_str.endswith('''\n'''):
json_str += "\n"
return json_str.encode(self.encoding)
def snake_case__ ( self , _A , _A , _A , _A , **_A , ) -> int:
"""simple docstring"""
_UpperCAmelCase : Any = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset) , self.batch_size) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
_UpperCAmelCase : Union[str, Any] = self._batch_json((offset, orient, lines, index, to_json_kwargs))
written += file_obj.write(_A)
else:
_UpperCAmelCase : List[str] = len(self.dataset), self.batch_size
with multiprocessing.Pool(self.num_proc) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , _A , _A)] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
written += file_obj.write(_A)
return written
| 702 |
import unittest
from knapsack import knapsack as k
class A_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = 0
_UpperCAmelCase : List[Any] = [0]
_UpperCAmelCase : Optional[Any] = [0]
_UpperCAmelCase : Optional[int] = len(_A)
self.assertEqual(k.knapsack(_A , _A , _A , _A) , 0)
_UpperCAmelCase : Optional[int] = [60]
_UpperCAmelCase : List[str] = [10]
_UpperCAmelCase : str = len(_A)
self.assertEqual(k.knapsack(_A , _A , _A , _A) , 0)
def snake_case__ ( self) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Any = 3
_UpperCAmelCase : int = [1, 2, 3]
_UpperCAmelCase : List[str] = [3, 2, 1]
_UpperCAmelCase : Union[str, Any] = len(_A)
self.assertEqual(k.knapsack(_A , _A , _A , _A) , 5)
def snake_case__ ( self) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : List[str] = 50
_UpperCAmelCase : Tuple = [60, 100, 120]
_UpperCAmelCase : Optional[int] = [10, 20, 30]
_UpperCAmelCase : Optional[Any] = len(_A)
self.assertEqual(k.knapsack(_A , _A , _A , _A) , 220)
if __name__ == "__main__":
unittest.main()
| 186 | 0 |
'''simple docstring'''
def __lowerCamelCase ( UpperCAmelCase_ ) ->list:
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(UpperCAmelCase_ ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 368 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __snake_case ( unittest.TestCase ):
def _snake_case ( self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _snake_case ( self ) -> str:
snake_case__ = 1
snake_case__ = 3
snake_case__ = (32, 32)
snake_case__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCamelCase_ )
return image
@property
def _snake_case ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
snake_case__ = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=UpperCamelCase_ , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def _snake_case ( self ) -> Optional[Any]:
torch.manual_seed(0 )
snake_case__ = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def _snake_case ( self ) -> List[str]:
torch.manual_seed(0 )
snake_case__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
return CLIPTextModel(UpperCamelCase_ )
def _snake_case ( self ) -> Union[str, Any]:
snake_case__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case__ = self.dummy_cond_unet_upscale
snake_case__ = DDPMScheduler()
snake_case__ = DDIMScheduler(prediction_type='v_prediction' )
snake_case__ = self.dummy_vae
snake_case__ = self.dummy_text_encoder
snake_case__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
snake_case__ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case__ = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert('RGB' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
snake_case__ = StableDiffusionUpscalePipeline(
unet=UpperCamelCase_ , low_res_scheduler=UpperCamelCase_ , scheduler=UpperCamelCase_ , vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , max_noise_level=350 , )
snake_case__ = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
snake_case__ = 'A painting of a squirrel eating a burger'
snake_case__ = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
snake_case__ = sd_pipe(
[prompt] , image=UpperCamelCase_ , generator=UpperCamelCase_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
snake_case__ = output.images
snake_case__ = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
snake_case__ = sd_pipe(
[prompt] , image=UpperCamelCase_ , generator=UpperCamelCase_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , return_dict=UpperCamelCase_ , )[0]
snake_case__ = image[0, -3:, -3:, -1]
snake_case__ = image_from_tuple[0, -3:, -3:, -1]
snake_case__ = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
snake_case__ = np.array([0.3_1_1_3, 0.3_9_1_0, 0.4_2_7_2, 0.4_8_5_9, 0.5_0_6_1, 0.4_6_5_2, 0.5_3_6_2, 0.5_7_1_5, 0.5_6_6_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> List[str]:
snake_case__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case__ = self.dummy_cond_unet_upscale
snake_case__ = DDPMScheduler()
snake_case__ = DDIMScheduler(prediction_type='v_prediction' )
snake_case__ = self.dummy_vae
snake_case__ = self.dummy_text_encoder
snake_case__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
snake_case__ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case__ = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert('RGB' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
snake_case__ = StableDiffusionUpscalePipeline(
unet=UpperCamelCase_ , low_res_scheduler=UpperCamelCase_ , scheduler=UpperCamelCase_ , vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , max_noise_level=350 , )
snake_case__ = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
snake_case__ = 'A painting of a squirrel eating a burger'
snake_case__ = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
snake_case__ = output.images
assert image.shape[0] == 2
snake_case__ = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
snake_case__ = sd_pipe(
[prompt] , image=UpperCamelCase_ , generator=UpperCamelCase_ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
snake_case__ = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def _snake_case ( self ) -> str:
snake_case__ = self.dummy_cond_unet_upscale
snake_case__ = DDPMScheduler()
snake_case__ = DDIMScheduler(prediction_type='v_prediction' )
snake_case__ = self.dummy_vae
snake_case__ = self.dummy_text_encoder
snake_case__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
snake_case__ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case__ = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert('RGB' ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
snake_case__ = unet.half()
snake_case__ = text_encoder.half()
# make sure here that pndm scheduler skips prk
snake_case__ = StableDiffusionUpscalePipeline(
unet=UpperCamelCase_ , low_res_scheduler=UpperCamelCase_ , scheduler=UpperCamelCase_ , vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , max_noise_level=350 , )
snake_case__ = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
snake_case__ = 'A painting of a squirrel eating a burger'
snake_case__ = torch.manual_seed(0 )
snake_case__ = sd_pipe(
[prompt] , image=UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=2 , output_type='np' , ).images
snake_case__ = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def _snake_case ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ) -> Optional[int]:
snake_case__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
snake_case__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat.npy' )
snake_case__ = 'stabilityai/stable-diffusion-x4-upscaler'
snake_case__ = StableDiffusionUpscalePipeline.from_pretrained(UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
snake_case__ = 'a cat sitting on a park bench'
snake_case__ = torch.manual_seed(0 )
snake_case__ = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type='np' , )
snake_case__ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def _snake_case ( self ) -> List[Any]:
snake_case__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
snake_case__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat_fp16.npy' )
snake_case__ = 'stabilityai/stable-diffusion-x4-upscaler'
snake_case__ = StableDiffusionUpscalePipeline.from_pretrained(
UpperCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
snake_case__ = 'a cat sitting on a park bench'
snake_case__ = torch.manual_seed(0 )
snake_case__ = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type='np' , )
snake_case__ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def _snake_case ( self ) -> int:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
snake_case__ = 'stabilityai/stable-diffusion-x4-upscaler'
snake_case__ = StableDiffusionUpscalePipeline.from_pretrained(
UpperCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
snake_case__ = 'a cat sitting on a park bench'
snake_case__ = torch.manual_seed(0 )
snake_case__ = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=5 , output_type='np' , )
snake_case__ = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 368 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = '''▁'''
lowerCAmelCase = {'''vocab_file''': '''sentencepiece.bpe.model'''}
lowerCAmelCase = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model'''
),
}
}
lowerCAmelCase = {
'''facebook/nllb-200-distilled-600M''': 1_024,
}
# fmt: off
lowerCAmelCase = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class lowerCAmelCase ( _UpperCAmelCase ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = ["""input_ids""", """attention_mask"""]
lowerCAmelCase__ = []
lowerCAmelCase__ = []
def __init__( self , a__ , a__="<s>" , a__="</s>" , a__="</s>" , a__="<s>" , a__="<unk>" , a__="<pad>" , a__="<mask>" , a__=None , a__=None , a__=None , a__ = None , a__=None , a__=False , **a__ , ):
_UpperCAmelCase = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else mask_token
_UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
_UpperCAmelCase = legacy_behaviour
super().__init__(
bos_token=lowercase__ , eos_token=lowercase__ , unk_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , pad_token=lowercase__ , mask_token=lowercase__ , tokenizer_file=lowercase__ , src_lang=lowercase__ , tgt_lang=lowercase__ , additional_special_tokens=lowercase__ , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=lowercase__ , **lowercase__ , )
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowercase__ ) )
_UpperCAmelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
_UpperCAmelCase = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_UpperCAmelCase = 1
_UpperCAmelCase = len(self.sp_model )
_UpperCAmelCase = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowercase__ )
}
_UpperCAmelCase = {v: k for k, v in self.lang_code_to_id.items()}
_UpperCAmelCase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
_UpperCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
_UpperCAmelCase = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
_UpperCAmelCase = src_lang if src_lang is not None else "eng_Latn"
_UpperCAmelCase = self.lang_code_to_id[self._src_lang]
_UpperCAmelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
_UpperCAmelCase = self.__dict__.copy()
_UpperCAmelCase = None
_UpperCAmelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , a__ ):
_UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_UpperCAmelCase = {}
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def __A ( self ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __A ( self ):
return self._src_lang
@src_lang.setter
def __A ( self , a__ ):
_UpperCAmelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __A ( self , a__ , a__ = None , a__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase__ , token_ids_a=lowercase__ , already_has_special_tokens=lowercase__ )
_UpperCAmelCase = [1] * len(self.prefix_tokens )
_UpperCAmelCase = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowercase__ )) + suffix_ones
return prefix_ones + ([0] * len(lowercase__ )) + ([0] * len(lowercase__ )) + suffix_ones
def __A ( self , a__ , a__ = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __A ( self , a__ , a__ = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __A ( self , a__ , a__ , a__ , a__ , **a__ ):
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
_UpperCAmelCase = src_lang
_UpperCAmelCase = self(lowercase__ , add_special_tokens=lowercase__ , return_tensors=lowercase__ , **lowercase__ )
_UpperCAmelCase = self.convert_tokens_to_ids(lowercase__ )
_UpperCAmelCase = tgt_lang_id
return inputs
def __A ( self ):
_UpperCAmelCase = {self.convert_ids_to_tokens(lowercase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __A ( self , a__ ):
return self.sp_model.encode(lowercase__ , out_type=lowercase__ )
def __A ( self , a__ ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCAmelCase = self.sp_model.PieceToId(lowercase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __A ( self , a__ ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __A ( self , a__ ):
_UpperCAmelCase = "".join(lowercase__ ).replace(lowercase__ , ' ' ).strip()
return out_string
def __A ( self , a__ , a__ = None ):
if not os.path.isdir(lowercase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_UpperCAmelCase = os.path.join(
lowercase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase__ , 'wb' ) as fi:
_UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(lowercase__ )
return (out_vocab_file,)
def __A ( self , a__ , a__ = "eng_Latn" , a__ = None , a__ = "fra_Latn" , **a__ , ):
_UpperCAmelCase = src_lang
_UpperCAmelCase = tgt_lang
return super().prepare_seqaseq_batch(lowercase__ , lowercase__ , **lowercase__ )
def __A ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def __A ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __A ( self , a__ ):
_UpperCAmelCase = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
_UpperCAmelCase = []
_UpperCAmelCase = [self.eos_token_id, self.cur_lang_code]
else:
_UpperCAmelCase = [self.cur_lang_code]
_UpperCAmelCase = [self.eos_token_id]
def __A ( self , a__ ):
_UpperCAmelCase = self.lang_code_to_id[lang]
if self.legacy_behaviour:
_UpperCAmelCase = []
_UpperCAmelCase = [self.eos_token_id, self.cur_lang_code]
else:
_UpperCAmelCase = [self.cur_lang_code]
_UpperCAmelCase = [self.eos_token_id]
| 702 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class lowerCAmelCase ( snake_case ):
lowerCAmelCase__ = (
"""This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."""
"""It takes two arguments named `image` which should be the original image, and `label` which should be a text """
"""describing the elements what should be identified in the segmentation mask. The tool returns the mask."""
)
lowerCAmelCase__ = """CIDAS/clipseg-rd64-refined"""
lowerCAmelCase__ = """image_segmenter"""
lowerCAmelCase__ = CLIPSegForImageSegmentation
lowerCAmelCase__ = ["""image""", """text"""]
lowerCAmelCase__ = ["""image"""]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ['vision'] )
super().__init__(*a__ , **a__ )
def __A ( self , a__ , a__ ):
return self.pre_processor(text=[label] , images=[image] , padding=a__ , return_tensors='pt' )
def __A ( self , a__ ):
with torch.no_grad():
_UpperCAmelCase = self.model(**a__ ).logits
return logits
def __A ( self , a__ ):
_UpperCAmelCase = outputs.cpu().detach().numpy()
_UpperCAmelCase = 0
_UpperCAmelCase = 1
return Image.fromarray((array * 2_55).astype(np.uinta ) )
| 494 | 0 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase__ (__snake_case , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Tuple = CTRLTokenizer
__UpperCamelCase : Any = False
__UpperCamelCase : Optional[int] = False
def lowercase ( self : Union[str, Any] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case__ : List[Any] = ["""adapt""", """re@@""", """a@@""", """apt""", """c@@""", """t""", """<unk>"""]
snake_case__ : Tuple = dict(zip(__a , range(len(__a ) ) ) )
snake_case__ : str = ["""#version: 0.2""", """a p""", """ap t</w>""", """r e""", """a d""", """ad apt</w>""", """"""]
snake_case__ : str = {"""unk_token""": """<unk>"""}
snake_case__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__a ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__a ) )
def lowercase ( self : str , **__a : Dict ):
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **__a )
def lowercase ( self : Optional[Any] , __a : List[Any] ):
snake_case__ : Any = """adapt react readapt apt"""
snake_case__ : List[str] = """adapt react readapt apt"""
return input_text, output_text
def lowercase ( self : Optional[Any] ):
snake_case__ : Dict = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case__ : Optional[Any] = """adapt react readapt apt"""
snake_case__ : int = """adapt re@@ a@@ c@@ t re@@ adapt apt""".split()
snake_case__ : str = tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
snake_case__ : Tuple = tokens + [tokenizer.unk_token]
snake_case__ : int = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
| 648 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
lowercase_: int = logging.get_logger(__name__)
lowercase_: Optional[int] = {
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/resolve/main/config.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/config.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/config.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json',
}
class lowercase__ (__snake_case ):
"""simple docstring"""
__UpperCamelCase : List[str] = 'bloom'
__UpperCamelCase : List[str] = ['past_key_values']
__UpperCamelCase : Optional[int] = {
'num_hidden_layers': 'n_layer',
'num_attention_heads': 'n_head',
}
def __init__( self : Union[str, Any] , __a : Tuple=2_5_0_8_8_0 , __a : Tuple=6_4 , __a : Optional[int]=2 , __a : Optional[int]=8 , __a : int=1e-5 , __a : Any=0.02 , __a : int=True , __a : Optional[Any]=1 , __a : Union[str, Any]=2 , __a : Any=False , __a : List[Any]=0.0 , __a : str=0.0 , __a : Optional[int]=1 , __a : Optional[int]=False , **__a : Dict , ):
snake_case__ : Optional[Any] = vocab_size
# Backward compatibility with n_embed kwarg
snake_case__ : str = kwargs.pop("""n_embed""" , __a )
snake_case__ : Any = hidden_size if n_embed is None else n_embed
snake_case__ : str = n_layer
snake_case__ : Optional[int] = n_head
snake_case__ : Any = layer_norm_epsilon
snake_case__ : Optional[Any] = initializer_range
snake_case__ : int = use_cache
snake_case__ : int = pretraining_tp
snake_case__ : Tuple = apply_residual_connection_post_layernorm
snake_case__ : Optional[int] = hidden_dropout
snake_case__ : List[str] = attention_dropout
snake_case__ : Optional[int] = bos_token_id
snake_case__ : Optional[Any] = eos_token_id
snake_case__ : Dict = slow_but_exact
super().__init__(bos_token_id=__a , eos_token_id=__a , **__a )
class lowercase__ (__snake_case ):
"""simple docstring"""
__UpperCamelCase : Dict = version.parse('1.12' )
def __init__( self : Tuple , __a : PretrainedConfig , __a : str = "default" , __a : List[PatchingSpec] = None , __a : bool = False , ):
super().__init__(__a , task=__a , patching_specs=__a , use_past=__a )
if not getattr(self._config , """pad_token_id""" , __a ):
# TODO: how to do that better?
snake_case__ : Optional[int] = 0
@property
def lowercase ( self : Union[str, Any] ):
snake_case__ : Optional[int] = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(__a , direction="""inputs""" , inverted_values_shape=__a )
snake_case__ : List[Any] = {0: """batch""", 1: """past_sequence + sequence"""}
else:
snake_case__ : Union[str, Any] = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def lowercase ( self : Optional[Any] ):
return self._config.n_layer
@property
def lowercase ( self : List[Any] ):
return self._config.n_head
@property
def lowercase ( self : Optional[int] ):
return 1e-3
def lowercase ( self : List[Any] , __a : "PreTrainedTokenizer" , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional["TensorType"] = None , ):
snake_case__ : List[str] = super(__a , self ).generate_dummy_inputs(
__a , batch_size=__a , seq_length=__a , is_pair=__a , framework=__a )
# We need to order the input in the way they appears in the forward()
snake_case__ : List[Any] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
snake_case__ , snake_case__ : int = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
snake_case__ : int = seqlen + 2
snake_case__ : Any = self._config.hidden_size // self.num_attention_heads
snake_case__ : int = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
snake_case__ : int = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
snake_case__ : Union[str, Any] = [
(torch.zeros(__a ), torch.zeros(__a )) for _ in range(self.num_layers )
]
snake_case__ : Optional[Any] = common_inputs["""attention_mask"""]
if self.use_past:
snake_case__ : Union[str, Any] = ordered_inputs["""attention_mask"""].dtype
snake_case__ : List[Any] = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(__a , __a , dtype=__a )] , dim=1 )
return ordered_inputs
@property
def lowercase ( self : Optional[Any] ):
return 1_3
| 648 | 1 |
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
lowerCAmelCase__ = "Create a default config file for Accelerate with only a few flags set."
def lowerCamelCase_ ( UpperCAmelCase_ : int="no" , UpperCAmelCase_ : Dict = default_json_config_file , UpperCAmelCase_ : Optional[Any] = False ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase : int = Path(__lowerCAmelCase )
path.parent.mkdir(parents=__lowerCAmelCase , exist_ok=__lowerCAmelCase )
if path.exists():
print(
F'''Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.''' )
return False
_UpperCamelCase : Dict = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F'''`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}''' )
_UpperCamelCase : Union[str, Any] = {
"""compute_environment""": """LOCAL_MACHINE""",
"""mixed_precision""": mixed_precision,
}
if torch.cuda.is_available():
_UpperCamelCase : str = torch.cuda.device_count()
_UpperCamelCase : str = num_gpus
_UpperCamelCase : str = False
if num_gpus > 1:
_UpperCamelCase : str = """MULTI_GPU"""
else:
_UpperCamelCase : int = """NO"""
elif is_xpu_available() and use_xpu:
_UpperCamelCase : Dict = torch.xpu.device_count()
_UpperCamelCase : Optional[int] = num_xpus
_UpperCamelCase : Optional[int] = False
if num_xpus > 1:
_UpperCamelCase : List[Any] = """MULTI_XPU"""
else:
_UpperCamelCase : Dict = """NO"""
elif is_npu_available():
_UpperCamelCase : Union[str, Any] = torch.npu.device_count()
_UpperCamelCase : Any = num_npus
_UpperCamelCase : Optional[int] = False
if num_npus > 1:
_UpperCamelCase : Union[str, Any] = """MULTI_NPU"""
else:
_UpperCamelCase : int = """NO"""
else:
_UpperCamelCase : List[Any] = 0
_UpperCamelCase : int = True
_UpperCamelCase : Any = 1
_UpperCamelCase : Optional[Any] = """NO"""
_UpperCamelCase : Optional[int] = ClusterConfig(**__lowerCAmelCase )
config.to_json_file(__lowerCAmelCase )
return path
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : int ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : Any = parser.add_parser('default' , parents=__lowerCAmelCase , help=__lowerCAmelCase , formatter_class=__lowerCAmelCase )
parser.add_argument(
'--config_file' , default=__lowerCAmelCase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , dest='save_location' , )
parser.add_argument(
'--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=__lowerCAmelCase , help='Whether or not to use mixed precision training. '
'Choose between FP16 and BF16 (bfloat16) training. '
'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , )
parser.set_defaults(func=__lowerCAmelCase )
return parser
def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> str:
'''simple docstring'''
_UpperCamelCase : str = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F'''accelerate configuration saved at {config_file}''' )
| 712 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""facebook/vit-mae-base""": """https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json""",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "vit_mae"
def __init__( self , __snake_case=7_68 , __snake_case=12 , __snake_case=12 , __snake_case=30_72 , __snake_case="gelu" , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.0_2 , __snake_case=1e-12 , __snake_case=2_24 , __snake_case=16 , __snake_case=3 , __snake_case=True , __snake_case=16 , __snake_case=5_12 , __snake_case=8 , __snake_case=20_48 , __snake_case=0.7_5 , __snake_case=False , **__snake_case , ):
super().__init__(**__snake_case)
_UpperCamelCase : Optional[int] = hidden_size
_UpperCamelCase : Optional[int] = num_hidden_layers
_UpperCamelCase : Tuple = num_attention_heads
_UpperCamelCase : List[str] = intermediate_size
_UpperCamelCase : str = hidden_act
_UpperCamelCase : List[str] = hidden_dropout_prob
_UpperCamelCase : List[Any] = attention_probs_dropout_prob
_UpperCamelCase : str = initializer_range
_UpperCamelCase : Any = layer_norm_eps
_UpperCamelCase : int = image_size
_UpperCamelCase : Any = patch_size
_UpperCamelCase : List[Any] = num_channels
_UpperCamelCase : Union[str, Any] = qkv_bias
_UpperCamelCase : str = decoder_num_attention_heads
_UpperCamelCase : Union[str, Any] = decoder_hidden_size
_UpperCamelCase : Union[str, Any] = decoder_num_hidden_layers
_UpperCamelCase : Any = decoder_intermediate_size
_UpperCamelCase : int = mask_ratio
_UpperCamelCase : List[Any] = norm_pix_loss
| 648 | 0 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_=0.2 , snake_case_=0.2 ) -> List[str]:
'''simple docstring'''
__lowercase = bp_numa
__lowercase = bp_numa
__lowercase = bp_numa
__lowercase = conva_get[:2]
__lowercase = conva_get[2]
__lowercase = size_pa
__lowercase = rate_w
__lowercase = rate_t
__lowercase = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
__lowercase = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
__lowercase = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
__lowercase = -2 * np.random.rand(self.conva[1] ) + 1
__lowercase = -2 * np.random.rand(self.num_bpa ) + 1
__lowercase = -2 * np.random.rand(self.num_bpa ) + 1
def A ( self , snake_case_ ) -> Dict:
'''simple docstring'''
__lowercase = {
'''num_bp1''': self.num_bpa,
'''num_bp2''': self.num_bpa,
'''num_bp3''': self.num_bpa,
'''conv1''': self.conva,
'''step_conv1''': self.step_conva,
'''size_pooling1''': self.size_poolinga,
'''rate_weight''': self.rate_weight,
'''rate_thre''': self.rate_thre,
'''w_conv1''': self.w_conva,
'''wkj''': self.wkj,
'''vji''': self.vji,
'''thre_conv1''': self.thre_conva,
'''thre_bp2''': self.thre_bpa,
'''thre_bp3''': self.thre_bpa,
}
with open(snake_case_ , '''wb''' ) as f:
pickle.dump(snake_case_ , snake_case_ )
print(F'Model saved: {save_path}' )
@classmethod
def A ( cls , snake_case_ ) -> List[Any]:
'''simple docstring'''
with open(snake_case_ , '''rb''' ) as f:
__lowercase = pickle.load(snake_case_ ) # noqa: S301
__lowercase = model_dic.get('''conv1''' )
conv_get.append(model_dic.get('''step_conv1''' ) )
__lowercase = model_dic.get('''size_pooling1''' )
__lowercase = model_dic.get('''num_bp1''' )
__lowercase = model_dic.get('''num_bp2''' )
__lowercase = model_dic.get('''num_bp3''' )
__lowercase = model_dic.get('''rate_weight''' )
__lowercase = model_dic.get('''rate_thre''' )
# create model instance
__lowercase = CNN(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# modify model parameter
__lowercase = model_dic.get('''w_conv1''' )
__lowercase = model_dic.get('''wkj''' )
__lowercase = model_dic.get('''vji''' )
__lowercase = model_dic.get('''thre_conv1''' )
__lowercase = model_dic.get('''thre_bp2''' )
__lowercase = model_dic.get('''thre_bp3''' )
return conv_ins
def A ( self , snake_case_ ) -> Tuple:
'''simple docstring'''
return 1 / (1 + np.exp(-1 * x ))
def A ( self , snake_case_ ) -> int:
'''simple docstring'''
return round(snake_case_ , 3 )
def A ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int:
'''simple docstring'''
__lowercase = convs[0]
__lowercase = convs[1]
__lowercase = np.shape(snake_case_ )[0]
# get the data slice of original image data, data_focus
__lowercase = []
for i_focus in range(0 , size_data - size_conv + 1 , snake_case_ ):
for j_focus in range(0 , size_data - size_conv + 1 , snake_case_ ):
__lowercase = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(snake_case_ )
# calculate the feature map of every single kernel, and saved as list of matrix
__lowercase = []
__lowercase = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(snake_case_ ):
__lowercase = []
for i_focus in range(len(snake_case_ ) ):
__lowercase = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(snake_case_ ) )
__lowercase = np.asmatrix(snake_case_ ).reshape(
snake_case_ , snake_case_ )
data_featuremap.append(snake_case_ )
# expanding the data slice to One dimenssion
__lowercase = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(snake_case_ ) )
__lowercase = np.asarray(snake_case_ )
return focus_list, data_featuremap
def A ( self , snake_case_ , snake_case_ , snake_case_="average_pool" ) -> Dict:
'''simple docstring'''
__lowercase = len(featuremaps[0] )
__lowercase = int(size_map / size_pooling )
__lowercase = []
for i_map in range(len(snake_case_ ) ):
__lowercase = featuremaps[i_map]
__lowercase = []
for i_focus in range(0 , snake_case_ , snake_case_ ):
for j_focus in range(0 , snake_case_ , snake_case_ ):
__lowercase = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(snake_case_ ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(snake_case_ ) )
__lowercase = np.asmatrix(snake_case_ ).reshape(snake_case_ , snake_case_ )
featuremap_pooled.append(snake_case_ )
return featuremap_pooled
def A ( self , snake_case_ ) -> Any:
'''simple docstring'''
__lowercase = []
for i in range(len(snake_case_ ) ):
__lowercase = np.shape(data[i] )
__lowercase = data[i].reshape(1 , shapes[0] * shapes[1] )
__lowercase = data_listed.getA().tolist()[0]
data_expanded.extend(snake_case_ )
__lowercase = np.asarray(snake_case_ )
return data_expanded
def A ( self , snake_case_ ) -> str:
'''simple docstring'''
__lowercase = np.asarray(snake_case_ )
__lowercase = np.shape(snake_case_ )
__lowercase = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def A ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = []
__lowercase = 0
for i_map in range(snake_case_ ):
__lowercase = np.ones((size_map, size_map) )
for i in range(0 , snake_case_ , snake_case_ ):
for j in range(0 , snake_case_ , snake_case_ ):
__lowercase = pd_pool[
i_pool
]
__lowercase = i_pool + 1
__lowercase = np.multiply(
snake_case_ , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(snake_case_ )
return pd_all
def A ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_=bool ) -> List[Any]:
'''simple docstring'''
print('''----------------------Start Training-------------------------''' )
print((''' - - Shape: Train_Data ''', np.shape(snake_case_ )) )
print((''' - - Shape: Teach_Data ''', np.shape(snake_case_ )) )
__lowercase = 0
__lowercase = []
__lowercase = 1_0_0_0_0
while rp < n_repeat and mse >= error_accuracy:
__lowercase = 0
print(F'-------------Learning Time {rp}--------------' )
for p in range(len(snake_case_ ) ):
# print('------------Learning Image: %d--------------'%p)
__lowercase = np.asmatrix(datas_train[p] )
__lowercase = np.asarray(datas_teach[p] )
__lowercase , __lowercase = self.convolute(
snake_case_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
__lowercase = self.pooling(snake_case_ , self.size_poolinga )
__lowercase = np.shape(snake_case_ )
__lowercase = self._expand(snake_case_ )
__lowercase = data_bp_input
__lowercase = np.dot(snake_case_ , self.vji.T ) - self.thre_bpa
__lowercase = self.sig(snake_case_ )
__lowercase = np.dot(snake_case_ , self.wkj.T ) - self.thre_bpa
__lowercase = self.sig(snake_case_ )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
__lowercase = np.multiply(
(data_teach - bp_outa) , np.multiply(snake_case_ , (1 - bp_outa) ) )
__lowercase = np.multiply(
np.dot(snake_case_ , self.wkj ) , np.multiply(snake_case_ , (1 - bp_outa) ) )
__lowercase = np.dot(snake_case_ , self.vji )
__lowercase = pd_i_all / (self.size_poolinga * self.size_poolinga)
__lowercase = pd_conva_pooled.T.getA().tolist()
__lowercase = self._calculate_gradient_from_pool(
snake_case_ , snake_case_ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
__lowercase = self._expand_mat(pd_conva_all[k_conv] )
__lowercase = self.rate_weight * np.dot(snake_case_ , snake_case_ )
__lowercase = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
__lowercase = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
__lowercase = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
__lowercase = self.vji + pd_j_all.T * bp_outa * self.rate_weight
__lowercase = self.thre_bpa - pd_k_all * self.rate_thre
__lowercase = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
__lowercase = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
__lowercase = rp + 1
__lowercase = error_count / patterns
all_mse.append(snake_case_ )
def draw_error():
__lowercase = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(snake_case_ , '''+-''' )
plt.plot(snake_case_ , '''r--''' )
plt.xlabel('''Learning Times''' )
plt.ylabel('''All_mse''' )
plt.grid(snake_case_ , alpha=0.5 )
plt.show()
print('''------------------Training Complished---------------------''' )
print((''' - - Training epoch: ''', rp, F' - - Mse: {mse:.6f}') )
if draw_e:
draw_error()
return mse
def A ( self , snake_case_ ) -> str:
'''simple docstring'''
__lowercase = []
print('''-------------------Start Testing-------------------------''' )
print((''' - - Shape: Test_Data ''', np.shape(snake_case_ )) )
for p in range(len(snake_case_ ) ):
__lowercase = np.asmatrix(datas_test[p] )
__lowercase , __lowercase = self.convolute(
snake_case_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
__lowercase = self.pooling(snake_case_ , self.size_poolinga )
__lowercase = self._expand(snake_case_ )
__lowercase = data_bp_input
__lowercase = bp_outa * self.vji.T - self.thre_bpa
__lowercase = self.sig(snake_case_ )
__lowercase = bp_outa * self.wkj.T - self.thre_bpa
__lowercase = self.sig(snake_case_ )
produce_out.extend(bp_outa.getA().tolist() )
__lowercase = [list(map(self.do_round , snake_case_ ) ) for each in produce_out]
return np.asarray(snake_case_ )
def A ( self , snake_case_ ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = np.asmatrix(snake_case_ )
__lowercase , __lowercase = self.convolute(
snake_case_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
__lowercase = self.pooling(snake_case_ , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 639 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowerCamelCase_ ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = False , snake_case_ = False , snake_case_ = None , **snake_case_ , ) -> Dict:
'''simple docstring'''
super().__init__(
snake_case_ , split=snake_case_ , features=snake_case_ , cache_dir=snake_case_ , keep_in_memory=snake_case_ , streaming=snake_case_ , num_proc=snake_case_ , **snake_case_ , )
__lowercase = path_or_paths if isinstance(snake_case_ , snake_case_ ) else {self.split: path_or_paths}
__lowercase = Text(
cache_dir=snake_case_ , data_files=snake_case_ , features=snake_case_ , **snake_case_ , )
def A ( self ) -> int:
'''simple docstring'''
if self.streaming:
__lowercase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__lowercase = None
__lowercase = None
__lowercase = None
__lowercase = None
self.builder.download_and_prepare(
download_config=snake_case_ , download_mode=snake_case_ , verification_mode=snake_case_ , base_path=snake_case_ , num_proc=self.num_proc , )
__lowercase = self.builder.as_dataset(
split=self.split , verification_mode=snake_case_ , in_memory=self.keep_in_memory )
return dataset
| 639 | 1 |
from math import pi
def snake_case_ ( __lowercase , __lowercase ):
return 2 * pi * radius * (angle / 3_6_0)
if __name__ == "__main__":
print(arc_length(90, 10)) | 702 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : str = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : List[Any] = 'unispeech-sat'
def __init__( self : int , __snake_case : Optional[int]=32 , __snake_case : Dict=768 , __snake_case : Optional[Any]=12 , __snake_case : Optional[int]=12 , __snake_case : Dict=3_072 , __snake_case : List[str]="gelu" , __snake_case : Any=0.1 , __snake_case : Tuple=0.1 , __snake_case : Optional[int]=0.1 , __snake_case : Tuple=0.0 , __snake_case : List[Any]=0.0 , __snake_case : Tuple=0.1 , __snake_case : Union[str, Any]=0.1 , __snake_case : Dict=0.02 , __snake_case : Optional[Any]=1E-5 , __snake_case : Optional[int]="group" , __snake_case : str="gelu" , __snake_case : Union[str, Any]=(512, 512, 512, 512, 512, 512, 512) , __snake_case : str=(5, 2, 2, 2, 2, 2, 2) , __snake_case : Tuple=(10, 3, 3, 3, 3, 2, 2) , __snake_case : int=False , __snake_case : Optional[int]=128 , __snake_case : Any=16 , __snake_case : Union[str, Any]=False , __snake_case : Union[str, Any]=True , __snake_case : List[Any]=0.05 , __snake_case : Dict=10 , __snake_case : int=2 , __snake_case : Optional[Any]=0.0 , __snake_case : Optional[int]=10 , __snake_case : List[Any]=0 , __snake_case : Optional[int]=320 , __snake_case : int=2 , __snake_case : Any=0.1 , __snake_case : Optional[int]=100 , __snake_case : Tuple=256 , __snake_case : List[str]=256 , __snake_case : List[Any]=0.1 , __snake_case : Tuple="mean" , __snake_case : List[Any]=False , __snake_case : List[str]=False , __snake_case : Optional[Any]=256 , __snake_case : Tuple=(512, 512, 512, 512, 1_500) , __snake_case : Optional[int]=(5, 3, 3, 1, 1) , __snake_case : Any=(1, 2, 3, 1, 1) , __snake_case : int=512 , __snake_case : Optional[int]=0 , __snake_case : Dict=1 , __snake_case : Tuple=2 , __snake_case : Union[str, Any]=504 , **__snake_case : List[str] , ):
'''simple docstring'''
super().__init__(**__snake_case , pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case )
UpperCAmelCase_ : Union[str, Any] = hidden_size
UpperCAmelCase_ : int = feat_extract_norm
UpperCAmelCase_ : Dict = feat_extract_activation
UpperCAmelCase_ : Union[str, Any] = list(__snake_case )
UpperCAmelCase_ : List[str] = list(__snake_case )
UpperCAmelCase_ : Any = list(__snake_case )
UpperCAmelCase_ : Any = conv_bias
UpperCAmelCase_ : List[str] = num_conv_pos_embeddings
UpperCAmelCase_ : Dict = num_conv_pos_embedding_groups
UpperCAmelCase_ : Optional[int] = len(self.conv_dim )
UpperCAmelCase_ : List[str] = num_hidden_layers
UpperCAmelCase_ : Dict = intermediate_size
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : str = num_attention_heads
UpperCAmelCase_ : Union[str, Any] = hidden_dropout
UpperCAmelCase_ : List[str] = attention_dropout
UpperCAmelCase_ : Optional[Any] = activation_dropout
UpperCAmelCase_ : Dict = feat_proj_dropout
UpperCAmelCase_ : Optional[Any] = final_dropout
UpperCAmelCase_ : List[Any] = layerdrop
UpperCAmelCase_ : int = layer_norm_eps
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : List[Any] = vocab_size
UpperCAmelCase_ : int = num_clusters
UpperCAmelCase_ : int = do_stable_layer_norm
UpperCAmelCase_ : Any = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase_ : int = apply_spec_augment
UpperCAmelCase_ : Optional[Any] = mask_time_prob
UpperCAmelCase_ : str = mask_time_length
UpperCAmelCase_ : Any = mask_time_min_masks
UpperCAmelCase_ : str = mask_feature_prob
UpperCAmelCase_ : str = mask_feature_length
UpperCAmelCase_ : Tuple = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
UpperCAmelCase_ : Optional[int] = num_codevectors_per_group
UpperCAmelCase_ : int = num_codevector_groups
UpperCAmelCase_ : List[str] = contrastive_logits_temperature
UpperCAmelCase_ : int = feat_quantizer_dropout
UpperCAmelCase_ : List[str] = num_negatives
UpperCAmelCase_ : Any = codevector_dim
UpperCAmelCase_ : Tuple = proj_codevector_dim
UpperCAmelCase_ : Union[str, Any] = diversity_loss_weight
# ctc loss
UpperCAmelCase_ : Any = ctc_loss_reduction
UpperCAmelCase_ : Optional[Any] = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCAmelCase_ : Optional[int] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase_ : Dict = list(__snake_case )
UpperCAmelCase_ : Dict = list(__snake_case )
UpperCAmelCase_ : Dict = list(__snake_case )
UpperCAmelCase_ : Union[str, Any] = xvector_output_dim
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 ) | 641 | 0 |
"""simple docstring"""
def _lowerCamelCase ( __a ):
SCREAMING_SNAKE_CASE_ = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
SCREAMING_SNAKE_CASE_ = set()
return any(
node not in visited and depth_first_search(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
for node in graph )
def _lowerCamelCase ( __a, __a, __a, __a ):
visited.add(__SCREAMING_SNAKE_CASE )
rec_stk.add(__SCREAMING_SNAKE_CASE )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(__SCREAMING_SNAKE_CASE )
return False
if __name__ == "__main__":
from doctest import testmod
testmod() | 626 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCAmelCase = abspath(join(dirname(__file__), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
config.addinivalue_line(
'markers' , 'is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested' )
config.addinivalue_line(
'markers' , 'is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested' )
config.addinivalue_line('markers' , 'is_pipeline_test: mark test to run only when pipelines are tested' )
config.addinivalue_line('markers' , 'is_staging_test: mark test to run only in the staging environment' )
config.addinivalue_line('markers' , 'accelerate_tests: mark test that require accelerate' )
config.addinivalue_line('markers' , 'tool_tests: mark the tool tests that are run on their specific schedule' )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
from transformers.testing_utils import pytest_terminal_summary_main
lowercase = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(__SCREAMING_SNAKE_CASE , id=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
lowercase = 0
# Doctest custom flag to ignore output.
UpperCAmelCase = doctest.register_optionflag('''IGNORE_RESULT''')
UpperCAmelCase = doctest.OutputChecker
class A_ ( __lowerCamelCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case ):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , snake_case , snake_case , snake_case )
UpperCAmelCase = CustomOutputChecker
UpperCAmelCase = HfDoctestModule
UpperCAmelCase = HfDocTestParser
| 84 | 0 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class _lowerCAmelCase :
def __init__( self : Optional[Any] , __snake_case : Tuple , __snake_case : Union[str, Any]=13 , __snake_case : int=10 , __snake_case : Optional[Any]=3 , __snake_case : Union[str, Any]=2 , __snake_case : Tuple=2 , __snake_case : Union[str, Any]=2 , __snake_case : int=True , __snake_case : Dict=True , __snake_case : Optional[int]=32 , __snake_case : int=5 , __snake_case : List[Any]=4 , __snake_case : Union[str, Any]=37 , __snake_case : List[str]="gelu" , __snake_case : int=0.1 , __snake_case : Optional[int]=0.1 , __snake_case : str=10 , __snake_case : Dict=0.0_2 , __snake_case : Union[str, Any]=0.9 , __snake_case : Any=None , ):
lowerCamelCase :Dict = parent
lowerCamelCase :List[Any] = batch_size
lowerCamelCase :str = image_size
lowerCamelCase :int = num_channels
lowerCamelCase :Any = patch_size
lowerCamelCase :Tuple = tubelet_size
lowerCamelCase :int = num_frames
lowerCamelCase :Any = is_training
lowerCamelCase :List[str] = use_labels
lowerCamelCase :Dict = hidden_size
lowerCamelCase :int = num_hidden_layers
lowerCamelCase :List[str] = num_attention_heads
lowerCamelCase :Optional[int] = intermediate_size
lowerCamelCase :Union[str, Any] = hidden_act
lowerCamelCase :Dict = hidden_dropout_prob
lowerCamelCase :Tuple = attention_probs_dropout_prob
lowerCamelCase :List[Any] = type_sequence_label_size
lowerCamelCase :List[Any] = initializer_range
lowerCamelCase :Any = mask_ratio
lowerCamelCase :Optional[Any] = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
lowerCamelCase :Optional[Any] = (image_size // patch_size) ** 2
lowerCamelCase :List[Any] = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
lowerCamelCase :Optional[Any] = int(mask_ratio * self.seq_length )
def snake_case ( self : Any ):
lowerCamelCase :str = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase :Any = None
if self.use_labels:
lowerCamelCase :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase :Any = self.get_config()
return config, pixel_values, labels
def snake_case ( self : List[Any] ):
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__snake_case , initializer_range=self.initializer_range , )
def snake_case ( self : List[Any] , __snake_case : int , __snake_case : List[Any] , __snake_case : List[Any] ):
lowerCamelCase :str = VideoMAEModel(config=__snake_case )
model.to(__snake_case )
model.eval()
lowerCamelCase :Optional[int] = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self : Tuple , __snake_case : List[Any] , __snake_case : Any , __snake_case : List[Any] ):
lowerCamelCase :Any = VideoMAEForPreTraining(__snake_case )
model.to(__snake_case )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowerCamelCase :int = torch.ones((self.num_masks,) )
lowerCamelCase :Dict = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
lowerCamelCase :Optional[Any] = mask.expand(self.batch_size , -1 ).bool()
lowerCamelCase :int = model(__snake_case , __snake_case )
# model only returns predictions for masked patches
lowerCamelCase :Optional[Any] = mask.sum().item()
lowerCamelCase :Any = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Tuple = self.prepare_config_and_inputs()
lowerCamelCase :Optional[Any] = config_and_inputs
lowerCamelCase :Optional[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
_UpperCAmelCase = (
{'feature-extraction': VideoMAEModel, 'video-classification': VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def snake_case ( self : str ):
lowerCamelCase :Union[str, Any] = VideoMAEModelTester(self )
lowerCamelCase :Tuple = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case , hidden_size=37 )
def snake_case ( self : Tuple , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : int=False ):
lowerCamelCase :Optional[Any] = copy.deepcopy(__snake_case )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowerCamelCase :Optional[int] = torch.ones((self.model_tester.num_masks,) )
lowerCamelCase :Tuple = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
lowerCamelCase :Optional[int] = mask.expand(self.model_tester.batch_size , -1 ).bool()
lowerCamelCase :Tuple = bool_masked_pos.to(__snake_case )
if return_labels:
if model_class in [
*get_values(__snake_case ),
]:
lowerCamelCase :str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
return inputs_dict
def snake_case ( self : Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''VideoMAE does not use inputs_embeds''' )
def snake_case ( self : Dict ):
pass
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase :List[str] = model_class(__snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase :int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__snake_case , nn.Linear ) )
def snake_case ( self : List[str] ):
lowerCamelCase :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase :Union[str, Any] = model_class(__snake_case )
lowerCamelCase :Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase :Any = [*signature.parameters.keys()]
lowerCamelCase :Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __snake_case )
def snake_case ( self : Tuple ):
lowerCamelCase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def snake_case ( self : List[Any] ):
lowerCamelCase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__snake_case )
@slow
def snake_case ( self : Dict ):
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase :Dict = VideoMAEModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def snake_case ( self : str ):
if not self.has_attentions:
pass
else:
lowerCamelCase :int = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase :Any = True
for model_class in self.all_model_classes:
lowerCamelCase :Any = self.model_tester.seq_length - self.model_tester.num_masks
lowerCamelCase :List[str] = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
lowerCamelCase :List[str] = True
lowerCamelCase :Any = False
lowerCamelCase :Optional[Any] = True
lowerCamelCase :Any = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :List[Any] = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :Dict = outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase :Tuple = True
lowerCamelCase :Dict = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :Optional[int] = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :List[Any] = outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowerCamelCase :Any = len(__snake_case )
# Check attention is always last and order is fine
lowerCamelCase :List[str] = True
lowerCamelCase :Optional[Any] = True
lowerCamelCase :Optional[int] = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :Any = model(**self._prepare_for_class(__snake_case , __snake_case ) )
self.assertEqual(out_len + 1 , len(__snake_case ) )
lowerCamelCase :Any = outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def snake_case ( self : List[Any] ):
def check_hidden_states_output(__snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Optional[int] ):
lowerCamelCase :List[str] = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :List[Any] = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :List[Any] = outputs.hidden_states
lowerCamelCase :Optional[Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__snake_case ) , __snake_case )
lowerCamelCase :Union[str, Any] = self.model_tester.seq_length - self.model_tester.num_masks
lowerCamelCase :Optional[Any] = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowerCamelCase :Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase :Tuple = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase :Dict = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def snake_case ( self : Optional[Any] ):
pass
def _lowerCamelCase ( ):
lowerCamelCase :List[str] = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''')
lowerCamelCase :List[str] = np.load(a_)
return list(a_)
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@cached_property
def snake_case ( self : str ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def snake_case ( self : Optional[Any] ):
lowerCamelCase :List[str] = VideoMAEForVideoClassification.from_pretrained('''MCG-NJU/videomae-base-finetuned-kinetics''' ).to(
__snake_case )
lowerCamelCase :Optional[Any] = self.default_image_processor
lowerCamelCase :int = prepare_video()
lowerCamelCase :Dict = image_processor(__snake_case , return_tensors='''pt''' ).to(__snake_case )
# forward pass
with torch.no_grad():
lowerCamelCase :List[str] = model(**__snake_case )
# verify the logits
lowerCamelCase :Dict = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , __snake_case )
lowerCamelCase :List[str] = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __snake_case , atol=1e-4 ) )
@slow
def snake_case ( self : Dict ):
lowerCamelCase :str = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' ).to(__snake_case )
lowerCamelCase :str = self.default_image_processor
lowerCamelCase :Any = prepare_video()
lowerCamelCase :List[Any] = image_processor(__snake_case , return_tensors='''pt''' ).to(__snake_case )
# add boolean mask, indicating which patches to mask
lowerCamelCase :int = hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' )
lowerCamelCase :Optional[Any] = torch.load(__snake_case )
# forward pass
with torch.no_grad():
lowerCamelCase :Union[str, Any] = model(**__snake_case )
# verify the logits
lowerCamelCase :List[Any] = torch.Size([1, 1408, 1536] )
lowerCamelCase :List[str] = torch.tensor(
[[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] , device=__snake_case )
self.assertEqual(outputs.logits.shape , __snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __snake_case , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
lowerCamelCase :str = torch.tensor([0.5_1_4_2] , device=__snake_case )
self.assertTrue(torch.allclose(outputs.loss , __snake_case , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
lowerCamelCase :List[Any] = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' , norm_pix_loss=__snake_case ).to(
__snake_case )
with torch.no_grad():
lowerCamelCase :Optional[int] = model(**__snake_case )
lowerCamelCase :Any = torch.tensor(torch.tensor([0.6_4_6_9] ) , device=__snake_case )
self.assertTrue(torch.allclose(outputs.loss , __snake_case , atol=1e-4 ) )
| 705 | import math
def _lowerCamelCase ( a_ : int):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(a_) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCamelCase ( a_ : float = 0.1):
lowerCamelCase :Dict = 3
lowerCamelCase :List[Any] = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1):
primes += is_prime(a_)
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49 | 0 |
import torch
def SCREAMING_SNAKE_CASE__ ( ) -> str:
if torch.cuda.is_available():
__lowerCamelCase : Optional[Any] = torch.cuda.device_count()
else:
__lowerCamelCase : Optional[Any] = 0
print(F"Successfully ran on {num_gpus} GPUs" )
if __name__ == "__main__":
main()
| 652 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
__UpperCamelCase : Tuple = TypeVar("""T""")
class __UpperCamelCase ( Generic[T] ):
def __init__( self : Optional[Any] , _lowerCAmelCase : T ) -> List[str]:
"""simple docstring"""
__lowercase = data
__lowercase = None
def __str__( self : List[str] ) -> str:
"""simple docstring"""
return F'{self.data}'
class __UpperCamelCase ( Generic[T] ):
def __init__( self : Optional[Any] ) -> None:
"""simple docstring"""
__lowercase = None
def __iter__( self : int ) -> Iterator[T]:
"""simple docstring"""
__lowercase = self.top
while node:
yield node.data
__lowercase = node.next
def __str__( self : List[str] ) -> str:
"""simple docstring"""
return "->".join([str(_lowerCAmelCase ) for item in self] )
def __len__( self : Any ) -> int:
"""simple docstring"""
return len(tuple(iter(self ) ) )
def _a ( self : str ) -> bool:
"""simple docstring"""
return self.top is None
def _a ( self : List[str] , _lowerCAmelCase : T ) -> None:
"""simple docstring"""
__lowercase = Node(_lowerCAmelCase )
if not self.is_empty():
__lowercase = self.top
__lowercase = node
def _a ( self : Union[str, Any] ) -> T:
"""simple docstring"""
if self.is_empty():
raise IndexError("""pop from empty stack""" )
assert isinstance(self.top , _lowerCAmelCase )
__lowercase = self.top
__lowercase = self.top.next
return pop_node.data
def _a ( self : int ) -> T:
"""simple docstring"""
if self.is_empty():
raise IndexError("""peek from empty stack""" )
assert self.top is not None
return self.top.data
def _a ( self : int ) -> None:
"""simple docstring"""
__lowercase = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 80 | 0 |
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
__UpperCAmelCase = '''\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
'''
__UpperCAmelCase = '''\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.
'''
__UpperCAmelCase = R'''
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting "1/2" to "\\frac{1}{2}")
Examples:
>>> metric = datasets.load_metric("competition_math")
>>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])
>>> print(results)
{\'accuracy\': 1.0}
'''
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Tuple:
lowerCAmelCase__ = 0.0
for i, j in zip(lowerCamelCase_ , lowerCamelCase_ ):
n_correct += 1.0 if math_equivalence.is_equiv(lowerCamelCase_ , lowerCamelCase_ ) else 0.0
lowerCAmelCase__ = n_correct / len(lowerCamelCase_ )
return {
"accuracy": accuracy,
} | 98 |
'''simple docstring'''
from manim import *
class a__ ( a__ ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ = Rectangle(height=0.5 , width=0.5 )
lowerCAmelCase__ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowerCAmelCase__ = [mem.copy() for i in range(6 )]
lowerCAmelCase__ = [mem.copy() for i in range(6 )]
lowerCAmelCase__ = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
lowerCAmelCase__ = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
lowerCAmelCase__ = VGroup(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
lowerCAmelCase__ = Text('''CPU''' , font_size=24 )
lowerCAmelCase__ = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCamelCase_ )
lowerCAmelCase__ = [mem.copy() for i in range(1 )]
lowerCAmelCase__ = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
lowerCAmelCase__ = Text('''GPU''' , font_size=24 )
lowerCAmelCase__ = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
gpu.align_to(lowerCamelCase_ , lowerCamelCase_ )
gpu.set_x(gpu.get_x() - 1 )
self.add(lowerCamelCase_ )
lowerCAmelCase__ = [mem.copy() for i in range(6 )]
lowerCAmelCase__ = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
lowerCAmelCase__ = Text('''Model''' , font_size=24 )
lowerCAmelCase__ = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
model.move_to([3, -1.0, 0] )
self.play(
Create(lowerCamelCase_ , run_time=1 ) , Create(lowerCamelCase_ , run_time=1 ) , Create(lowerCamelCase_ , run_time=1 ) , )
lowerCAmelCase__ = MarkupText(
F"""First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM.""" , font_size=24 , )
lowerCAmelCase__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCAmelCase__ = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase_ , run_time=2.5 ) , Write(lowerCamelCase_ ) , Write(lowerCamelCase_ ) )
self.add(lowerCamelCase_ )
lowerCAmelCase__ = []
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for i, rect in enumerate(lowerCamelCase_ ):
lowerCAmelCase__ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase_ , opacity=0.7 )
cpu_target.move_to(lowerCamelCase_ )
cpu_target.generate_target()
lowerCAmelCase__ = 0.46 / 4
lowerCAmelCase__ = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowerCamelCase_ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=lowerCamelCase_ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=lowerCamelCase_ , buff=0.0 )
cpu_targs.append(lowerCamelCase_ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(lowerCamelCase_ ) )
second_animations.append(MoveToTarget(lowerCamelCase_ , run_time=1.5 ) )
self.play(*lowerCamelCase_ )
self.play(*lowerCamelCase_ )
self.wait() | 98 | 1 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
_lowerCAmelCase : Optional[int] =object()
# For specifying empty leaf dict `{}`
_lowerCAmelCase : Any =object()
def _A ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: Any = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(__UpperCamelCase ) - len(__UpperCamelCase ) + 1 ):
UpperCAmelCase__: List[Any] = [x.match(__UpperCamelCase ) for x, y in zip(__UpperCamelCase ,ks[i:] )]
if matches and all(__UpperCamelCase ):
return True
return False
def _A ( SCREAMING_SNAKE_CASE ):
def replace(SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ):
for rule, replacement in rules:
if _match(__UpperCamelCase ,__UpperCamelCase ):
return replacement
return val
return replace
def _A ( ):
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp" ,__UpperCamelCase )),
(("transformer", "wte", "embedding"), P("mp" ,__UpperCamelCase )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__UpperCamelCase ,"mp" )),
(("attention", "out_proj", "kernel"), P("mp" ,__UpperCamelCase )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__UpperCamelCase ,"mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp" ,__UpperCamelCase )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def _A ( SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: Optional[int] = _get_partition_rules()
UpperCAmelCase__: Optional[int] = _replacement_rules(__UpperCamelCase )
UpperCAmelCase__: List[str] = {k: _unmatched for k in flatten_dict(__UpperCamelCase )}
UpperCAmelCase__: List[str] = {k: replace(__UpperCamelCase ,__UpperCamelCase ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__UpperCamelCase ) ) | 113 |
"""simple docstring"""
from jiwer import compute_measures
import datasets
__lowerCAmelCase : Tuple = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
__lowerCAmelCase : Union[str, Any] = '''\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
'''
__lowerCAmelCase : Optional[int] = '''
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> wer = datasets.load_metric("wer")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
] , )
def UpperCAmelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=False ) -> Optional[Any]:
'''simple docstring'''
if concatenate_texts:
return compute_measures(_lowercase , _lowercase )["wer"]
else:
snake_case_ : List[str] = 0
snake_case_ : Optional[int] = 0
for prediction, reference in zip(_lowercase , _lowercase ):
snake_case_ : Optional[Any] = compute_measures(_lowercase , _lowercase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 58 | 0 |
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowercase , lowercase=1_3 , lowercase=7 , lowercase=True , lowercase=True , lowercase=9_9 , lowercase=3_2 , lowercase=5 , lowercase=4 , lowercase=3_7 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=5_0 , lowercase=0.02 , lowercase=True , lowercase=None , ):
"""simple docstring"""
A_ : Tuple = parent
A_ : List[str] = batch_size
A_ : Any = seq_length
A_ : List[Any] = is_training
A_ : Any = use_input_mask
A_ : Dict = vocab_size
A_ : Optional[Any] = hidden_size
A_ : Optional[int] = num_hidden_layers
A_ : Dict = num_attention_heads
A_ : Optional[int] = intermediate_size
A_ : Any = hidden_act
A_ : List[Any] = hidden_dropout_prob
A_ : Optional[Any] = attention_probs_dropout_prob
A_ : Any = max_position_embeddings
A_ : str = initializer_range
A_ : List[Any] = use_labels
A_ : List[Any] = scope
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ : Optional[int] = None
if self.use_input_mask:
A_ : str = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
A_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ : Union[str, Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : Dict = self.prepare_config_and_inputs()
A_ : Any = True
A_ : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , lowercase , **lowercase , ):
"""simple docstring"""
A_ : Optional[int] = BertGenerationEncoder(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Any = model(_lowerCamelCase , attention_mask=_lowerCamelCase )
A_ : List[str] = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , **lowercase , ):
"""simple docstring"""
A_ : Optional[int] = True
A_ : Tuple = BertGenerationEncoder(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Any = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , encoder_attention_mask=_lowerCamelCase , )
A_ : str = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , **lowercase , ):
"""simple docstring"""
A_ : List[Any] = True
A_ : Dict = True
A_ : Dict = BertGenerationDecoder(config=_lowerCamelCase ).to(_lowerCamelCase ).eval()
# first forward pass
A_ : Optional[int] = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , encoder_attention_mask=_lowerCamelCase , use_cache=_lowerCamelCase , )
A_ : str = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A_ : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size )
A_ : str = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
A_ : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
A_ : Optional[int] = torch.cat([input_mask, next_mask] , dim=-1 )
A_ : Any = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , encoder_attention_mask=_lowerCamelCase , output_hidden_states=_lowerCamelCase , )['hidden_states'][0]
A_ : int = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , encoder_attention_mask=_lowerCamelCase , past_key_values=_lowerCamelCase , output_hidden_states=_lowerCamelCase , )['hidden_states'][0]
# select random slice
A_ : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A_ : str = output_from_no_past[:, -3:, random_slice_idx].detach()
A_ : List[str] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1E-3 ) )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , lowercase , *lowercase , ):
"""simple docstring"""
A_ : Optional[Any] = BertGenerationDecoder(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : int = model(_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ , A_ , A_ , A_ : Any = self.prepare_config_and_inputs()
A_ : Any = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
lowerCamelCase_ = (BertGenerationDecoder,) if is_torch_available() else ()
lowerCamelCase_ = (
{"feature-extraction": BertGenerationEncoder, "text-generation": BertGenerationDecoder}
if is_torch_available()
else {}
)
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = BertGenerationEncoderTester(self )
A_ : Union[str, Any] = ConfigTester(self , config_class=_lowerCamelCase , hidden_size=3_7 )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ , A_ , A_ , A_ : Tuple = self.model_tester.prepare_config_and_inputs()
A_ : int = 'bert'
self.model_tester.create_and_check_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_lowerCamelCase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*_lowerCamelCase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
A_ : Any = None
self.model_tester.create_and_check_model_as_decoder(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*_lowerCamelCase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
self.assertIsNotNone(_lowerCamelCase )
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Dict = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
A_ : Optional[Any] = torch.tensor([[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_2_6, 3_8_9_9, 2_0_0_3, 1_0_1_4_0, 1_0_2]] )
with torch.no_grad():
A_ : int = model(_lowerCamelCase )[0]
A_ : Any = torch.Size([1, 8, 1_0_2_4] )
self.assertEqual(output.shape , _lowerCamelCase )
A_ : int = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCamelCase , atol=1E-4 ) )
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Any = BertGenerationDecoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
A_ : Optional[int] = torch.tensor([[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_2_6, 3_8_9_9, 2_0_0_3, 1_0_1_4_0, 1_0_2]] )
with torch.no_grad():
A_ : str = model(_lowerCamelCase )[0]
A_ : Tuple = torch.Size([1, 8, 5_0_3_5_8] )
self.assertEqual(output.shape , _lowerCamelCase )
A_ : Dict = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCamelCase , atol=1E-4 ) )
| 712 | from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowercase , lowercase=3 , lowercase=3_2 , lowercase=3 , lowercase=1_0 , lowercase=[1_0, 2_0, 3_0, 4_0] , lowercase=[1, 1, 2, 1] , lowercase=True , lowercase=True , lowercase="relu" , lowercase=3 , lowercase=None , ):
"""simple docstring"""
A_ : List[Any] = parent
A_ : Optional[Any] = batch_size
A_ : Dict = image_size
A_ : str = num_channels
A_ : Union[str, Any] = embeddings_size
A_ : Optional[Any] = hidden_sizes
A_ : Any = depths
A_ : List[str] = is_training
A_ : int = use_labels
A_ : Optional[Any] = hidden_act
A_ : List[Any] = num_labels
A_ : Optional[int] = scope
A_ : int = len(lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Union[str, Any] = None
if self.use_labels:
A_ : Tuple = ids_tensor([self.batch_size] , self.num_labels )
A_ : Optional[int] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ):
"""simple docstring"""
A_ : Any = TFRegNetModel(config=lowercase )
A_ : Optional[Any] = model(lowercase , training=lowercase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ):
"""simple docstring"""
A_ : int = self.num_labels
A_ : Tuple = TFRegNetForImageClassification(lowercase )
A_ : List[str] = model(lowercase , labels=lowercase , training=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = self.prepare_config_and_inputs()
A_ , A_ , A_ : List[Any] = config_and_inputs
A_ : Dict = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( __A , __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
lowerCamelCase_ = (
{'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : str = TFRegNetModelTester(self )
A_ : List[Any] = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
super().test_keras_fit()
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ , A_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Optional[Any] = model_class(lowercase )
A_ : Tuple = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : Optional[Any] = [*signature.parameters.keys()]
A_ : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
def check_hidden_states_output(lowercase , lowercase , lowercase ):
A_ : List[Any] = model_class(lowercase )
A_ : int = model(**self._prepare_for_class(lowercase , lowercase ) , training=lowercase )
A_ : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A_ : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(lowercase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
A_ , A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : List[Any] = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
A_ : int = layer_type
A_ : Tuple = True
check_hidden_states_output(lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : Any = True
check_hidden_states_output(lowercase , lowercase , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ , A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(lowercase , lowercase , lowercase , lowercase={} ):
A_ : Tuple = model(lowercase , return_dict=lowercase , **lowercase )
A_ : Optional[Any] = model(lowercase , return_dict=lowercase , **lowercase ).to_tuple()
def recursive_check(lowercase , lowercase ):
if isinstance(lowercase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(lowercase , lowercase ):
recursive_check(lowercase , lowercase )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(lowercase , lowercase ) ) , msg=(
'Tuple and dict output are not equal. Difference:'
F''' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}'''
) , )
recursive_check(lowercase , lowercase )
for model_class in self.all_model_classes:
A_ : Dict = model_class(lowercase )
A_ : Optional[int] = self._prepare_for_class(lowercase , lowercase )
A_ : Union[str, Any] = self._prepare_for_class(lowercase , lowercase )
check_equivalence(lowercase , lowercase , lowercase )
A_ : str = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
A_ : List[str] = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
check_equivalence(lowercase , lowercase , lowercase )
A_ : Any = self._prepare_for_class(lowercase , lowercase )
A_ : int = self._prepare_for_class(lowercase , lowercase )
check_equivalence(lowercase , lowercase , lowercase , {'output_hidden_states': True} )
A_ : Tuple = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
A_ : int = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
check_equivalence(lowercase , lowercase , lowercase , {'output_hidden_states': True} )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : List[Any] = TFRegNetModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def UpperCamelCase ( ):
'''simple docstring'''
A_ : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
A_ : int = self.default_image_processor
A_ : List[str] = prepare_img()
A_ : Any = image_processor(images=lowercase , return_tensors='tf' )
# forward pass
A_ : Tuple = model(**lowercase , training=lowercase )
# verify the logits
A_ : int = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowercase )
A_ : Tuple = tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3] , lowercase , atol=1E-4 )
| 70 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase : Dict = {
"""configuration_convbert""": ["""CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvBertConfig""", """ConvBertOnnxConfig"""],
"""tokenization_convbert""": ["""ConvBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = ["""ConvBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = [
"""CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvBertForMaskedLM""",
"""ConvBertForMultipleChoice""",
"""ConvBertForQuestionAnswering""",
"""ConvBertForSequenceClassification""",
"""ConvBertForTokenClassification""",
"""ConvBertLayer""",
"""ConvBertModel""",
"""ConvBertPreTrainedModel""",
"""load_tf_weights_in_convbert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = [
"""TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFConvBertForMaskedLM""",
"""TFConvBertForMultipleChoice""",
"""TFConvBertForQuestionAnswering""",
"""TFConvBertForSequenceClassification""",
"""TFConvBertForTokenClassification""",
"""TFConvBertLayer""",
"""TFConvBertModel""",
"""TFConvBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 80 | '''simple docstring'''
def snake_case__ ( _A: int ) -> list[int]:
'''simple docstring'''
if length <= 0 or not isinstance(_A , _A ):
raise ValueError("""Length must be a positive integer.""" )
return [n * (2 * n - 1) for n in range(_A )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=1_0))
| 370 | 0 |
"""simple docstring"""
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("""0.8.3"""):
raise Exception("""requires gluonnlp == 0.8.3""")
if version.parse(mx.__version__) != version.parse("""1.5.0"""):
raise Exception("""requires mxnet == 1.5.0""")
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = """The Nymphenburg Palace is a beautiful palace in Munich!"""
def _lowerCamelCase ( UpperCAmelCase_ : str, UpperCAmelCase_ : str ) -> int:
"""simple docstring"""
A__ = {
"attention_cell": "multi_head",
"num_layers": 4,
"units": 1024,
"hidden_size": 768,
"max_length": 512,
"num_heads": 8,
"scaled": True,
"dropout": 0.1,
"use_residual": True,
"embed_size": 1024,
"embed_dropout": 0.1,
"word_embed": None,
"layer_norm_eps": 1e-5,
"token_type_vocab_size": 2,
}
A__ = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
A__ = BERTEncoder(
attention_cell=predefined_args["attention_cell"], num_layers=predefined_args["num_layers"], units=predefined_args["units"], hidden_size=predefined_args["hidden_size"], max_length=predefined_args["max_length"], num_heads=predefined_args["num_heads"], scaled=predefined_args["scaled"], dropout=predefined_args["dropout"], output_attention=UpperCAmelCase_, output_all_encodings=UpperCAmelCase_, use_residual=predefined_args["use_residual"], activation=predefined_args.get("activation", "gelu" ), layer_norm_eps=predefined_args.get("layer_norm_eps", UpperCAmelCase_ ), )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
A__ = "openwebtext_ccnews_stories_books_cased"
# Specify download folder to Gluonnlp's vocab
A__ = os.path.join(get_home_dir(), "models" )
A__ = _load_vocab(UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, cls=UpperCAmelCase_ )
A__ = nlp.model.BERTModel(
UpperCAmelCase_, len(UpperCAmelCase_ ), units=predefined_args["units"], embed_size=predefined_args["embed_size"], embed_dropout=predefined_args["embed_dropout"], word_embed=predefined_args["word_embed"], use_pooler=UpperCAmelCase_, use_token_type_embed=UpperCAmelCase_, token_type_vocab_size=predefined_args["token_type_vocab_size"], use_classifier=UpperCAmelCase_, use_decoder=UpperCAmelCase_, )
original_bort.load_parameters(UpperCAmelCase_, cast_dtype=UpperCAmelCase_, ignore_extra=UpperCAmelCase_ )
A__ = original_bort._collect_params_with_prefix()
# Build our config 🤗
A__ = {
"architectures": ["BertForMaskedLM"],
"attention_probs_dropout_prob": predefined_args["dropout"],
"hidden_act": "gelu",
"hidden_dropout_prob": predefined_args["dropout"],
"hidden_size": predefined_args["embed_size"],
"initializer_range": 0.02,
"intermediate_size": predefined_args["hidden_size"],
"layer_norm_eps": predefined_args["layer_norm_eps"],
"max_position_embeddings": predefined_args["max_length"],
"model_type": "bort",
"num_attention_heads": predefined_args["num_heads"],
"num_hidden_layers": predefined_args["num_layers"],
"pad_token_id": 1, # 2 = BERT, 1 = RoBERTa
"type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa
"vocab_size": len(UpperCAmelCase_ ),
}
A__ = BertConfig.from_dict(UpperCAmelCase_ )
A__ = BertForMaskedLM(UpperCAmelCase_ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(UpperCAmelCase_ : Any ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(UpperCAmelCase_ : Optional[int], UpperCAmelCase_ : List[Any] ):
A__ = hf_param.shape
A__ = to_torch(params[gluon_param] )
A__ = gluon_param.shape
assert (
shape_hf == shape_gluon
), F"""The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"""
return gluon_param
A__ = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight, "word_embed.0.weight" )
A__ = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight, "encoder.position_weight" )
A__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias, "encoder.layer_norm.beta" )
A__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight, "encoder.layer_norm.gamma" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
A__ = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
A__ = hf_bort_model.bert.encoder.layer[i]
# self attention
A__ = layer.attention.self
A__ = check_and_map_params(
self_attn.key.bias.data, F"""encoder.transformer_cells.{i}.attention_cell.proj_key.bias""" )
A__ = check_and_map_params(
self_attn.key.weight.data, F"""encoder.transformer_cells.{i}.attention_cell.proj_key.weight""" )
A__ = check_and_map_params(
self_attn.query.bias.data, F"""encoder.transformer_cells.{i}.attention_cell.proj_query.bias""" )
A__ = check_and_map_params(
self_attn.query.weight.data, F"""encoder.transformer_cells.{i}.attention_cell.proj_query.weight""" )
A__ = check_and_map_params(
self_attn.value.bias.data, F"""encoder.transformer_cells.{i}.attention_cell.proj_value.bias""" )
A__ = check_and_map_params(
self_attn.value.weight.data, F"""encoder.transformer_cells.{i}.attention_cell.proj_value.weight""" )
# self attention output
A__ = layer.attention.output
A__ = check_and_map_params(
self_output.dense.bias, F"""encoder.transformer_cells.{i}.proj.bias""" )
A__ = check_and_map_params(
self_output.dense.weight, F"""encoder.transformer_cells.{i}.proj.weight""" )
A__ = check_and_map_params(
self_output.LayerNorm.bias, F"""encoder.transformer_cells.{i}.layer_norm.beta""" )
A__ = check_and_map_params(
self_output.LayerNorm.weight, F"""encoder.transformer_cells.{i}.layer_norm.gamma""" )
# intermediate
A__ = layer.intermediate
A__ = check_and_map_params(
intermediate.dense.bias, F"""encoder.transformer_cells.{i}.ffn.ffn_1.bias""" )
A__ = check_and_map_params(
intermediate.dense.weight, F"""encoder.transformer_cells.{i}.ffn.ffn_1.weight""" )
# output
A__ = layer.output
A__ = check_and_map_params(
bert_output.dense.bias, F"""encoder.transformer_cells.{i}.ffn.ffn_2.bias""" )
A__ = check_and_map_params(
bert_output.dense.weight, F"""encoder.transformer_cells.{i}.ffn.ffn_2.weight""" )
A__ = check_and_map_params(
bert_output.LayerNorm.bias, F"""encoder.transformer_cells.{i}.ffn.layer_norm.beta""" )
A__ = check_and_map_params(
bert_output.LayerNorm.weight, F"""encoder.transformer_cells.{i}.ffn.layer_norm.gamma""" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
A__ = RobertaTokenizer.from_pretrained("roberta-base" )
A__ = tokenizer.encode_plus(UpperCAmelCase_ )["input_ids"]
# Get gluon output
A__ = mx.nd.array([input_ids] )
A__ = original_bort(inputs=UpperCAmelCase_, token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(UpperCAmelCase_ )
A__ = BertModel.from_pretrained(UpperCAmelCase_ )
hf_bort_model.eval()
A__ = tokenizer.encode_plus(UpperCAmelCase_, return_tensors="pt" )
A__ = hf_bort_model(**UpperCAmelCase_ )[0]
A__ = output_gluon[0].asnumpy()
A__ = output_hf[0].detach().numpy()
A__ = np.max(np.abs(hf_layer - gluon_layer ) ).item()
A__ = np.allclose(UpperCAmelCase_, UpperCAmelCase_, atol=1e-3 )
if success:
print("✔️ Both model do output the same tensors" )
else:
print("❌ Both model do **NOT** output the same tensors" )
print("Absolute difference is:", UpperCAmelCase_ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--bort_checkpoint_path""", default=None, type=str, required=True, help="""Path the official Bort params file."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCamelCase = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 562 |
"""simple docstring"""
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
"""The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"""
)
UpperCamelCase = None
UpperCamelCase = {
"""7B""": 1_1008,
"""13B""": 1_3824,
"""30B""": 1_7920,
"""65B""": 2_2016,
"""70B""": 2_8672,
}
UpperCamelCase = {
"""7B""": 1,
"""7Bf""": 1,
"""13B""": 2,
"""13Bf""": 2,
"""30B""": 4,
"""65B""": 8,
"""70B""": 8,
"""70Bf""": 8,
}
def _lowerCamelCase ( UpperCAmelCase_ : Union[str, Any], UpperCAmelCase_ : Optional[Any]=1, UpperCAmelCase_ : Union[str, Any]=256 ) -> Any:
"""simple docstring"""
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def _lowerCamelCase ( UpperCAmelCase_ : Optional[int] ) -> List[str]:
"""simple docstring"""
with open(UpperCAmelCase_, "r" ) as f:
return json.load(UpperCAmelCase_ )
def _lowerCamelCase ( UpperCAmelCase_ : int, UpperCAmelCase_ : Tuple ) -> Tuple:
"""simple docstring"""
with open(UpperCAmelCase_, "w" ) as f:
json.dump(UpperCAmelCase_, UpperCAmelCase_ )
def _lowerCamelCase ( UpperCAmelCase_ : Optional[int], UpperCAmelCase_ : Union[str, Any], UpperCAmelCase_ : Union[str, Any], UpperCAmelCase_ : Optional[int]=True ) -> List[Any]:
"""simple docstring"""
os.makedirs(UpperCAmelCase_, exist_ok=UpperCAmelCase_ )
A__ = os.path.join(UpperCAmelCase_, "tmp" )
os.makedirs(UpperCAmelCase_, exist_ok=UpperCAmelCase_ )
A__ = read_json(os.path.join(UpperCAmelCase_, "params.json" ) )
A__ = NUM_SHARDS[model_size]
A__ = params["n_layers"]
A__ = params["n_heads"]
A__ = n_heads // num_shards
A__ = params["dim"]
A__ = dim // n_heads
A__ = 1_0000.0
A__ = 1.0 / (base ** (torch.arange(0, UpperCAmelCase_, 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
A__ = params["n_kv_heads"] # for GQA / MQA
A__ = n_heads_per_shard // num_key_value_heads
A__ = dim // num_key_value_heads
else: # compatibility with other checkpoints
A__ = n_heads
A__ = n_heads_per_shard
A__ = dim
# permute for sliced rotary
def permute(UpperCAmelCase_ : Optional[Any], UpperCAmelCase_ : List[str]=n_heads, UpperCAmelCase_ : List[str]=dim, UpperCAmelCase_ : str=dim ):
return w.view(UpperCAmelCase_, dima // n_heads // 2, 2, UpperCAmelCase_ ).transpose(1, 2 ).reshape(UpperCAmelCase_, UpperCAmelCase_ )
print(F"""Fetching all parameters from the checkpoint at {input_base_path}.""" )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
A__ = torch.load(os.path.join(UpperCAmelCase_, "consolidated.00.pth" ), map_location="cpu" )
else:
# Sharded
A__ = [
torch.load(os.path.join(UpperCAmelCase_, F"""consolidated.{i:02d}.pth""" ), map_location="cpu" )
for i in range(UpperCAmelCase_ )
]
A__ = 0
A__ = {"weight_map": {}}
for layer_i in range(UpperCAmelCase_ ):
A__ = F"""pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin"""
if model_size == "7B":
# Unsharded
A__ = {
F"""model.layers.{layer_i}.self_attn.q_proj.weight""": permute(
loaded[F"""layers.{layer_i}.attention.wq.weight"""] ),
F"""model.layers.{layer_i}.self_attn.k_proj.weight""": permute(
loaded[F"""layers.{layer_i}.attention.wk.weight"""] ),
F"""model.layers.{layer_i}.self_attn.v_proj.weight""": loaded[F"""layers.{layer_i}.attention.wv.weight"""],
F"""model.layers.{layer_i}.self_attn.o_proj.weight""": loaded[F"""layers.{layer_i}.attention.wo.weight"""],
F"""model.layers.{layer_i}.mlp.gate_proj.weight""": loaded[F"""layers.{layer_i}.feed_forward.w1.weight"""],
F"""model.layers.{layer_i}.mlp.down_proj.weight""": loaded[F"""layers.{layer_i}.feed_forward.w2.weight"""],
F"""model.layers.{layer_i}.mlp.up_proj.weight""": loaded[F"""layers.{layer_i}.feed_forward.w3.weight"""],
F"""model.layers.{layer_i}.input_layernorm.weight""": loaded[F"""layers.{layer_i}.attention_norm.weight"""],
F"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[F"""layers.{layer_i}.ffn_norm.weight"""],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
A__ = {
F"""model.layers.{layer_i}.input_layernorm.weight""": loaded[0][
F"""layers.{layer_i}.attention_norm.weight"""
].clone(),
F"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[0][
F"""layers.{layer_i}.ffn_norm.weight"""
].clone(),
}
A__ = permute(
torch.cat(
[
loaded[i][F"""layers.{layer_i}.attention.wq.weight"""].view(UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ )
for i in range(UpperCAmelCase_ )
], dim=0, ).reshape(UpperCAmelCase_, UpperCAmelCase_ ) )
A__ = permute(
torch.cat(
[
loaded[i][F"""layers.{layer_i}.attention.wk.weight"""].view(
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ )
for i in range(UpperCAmelCase_ )
], dim=0, ).reshape(UpperCAmelCase_, UpperCAmelCase_ ), UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, )
A__ = torch.cat(
[
loaded[i][F"""layers.{layer_i}.attention.wv.weight"""].view(
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ )
for i in range(UpperCAmelCase_ )
], dim=0, ).reshape(UpperCAmelCase_, UpperCAmelCase_ )
A__ = torch.cat(
[loaded[i][F"""layers.{layer_i}.attention.wo.weight"""] for i in range(UpperCAmelCase_ )], dim=1 )
A__ = torch.cat(
[loaded[i][F"""layers.{layer_i}.feed_forward.w1.weight"""] for i in range(UpperCAmelCase_ )], dim=0 )
A__ = torch.cat(
[loaded[i][F"""layers.{layer_i}.feed_forward.w2.weight"""] for i in range(UpperCAmelCase_ )], dim=1 )
A__ = torch.cat(
[loaded[i][F"""layers.{layer_i}.feed_forward.w3.weight"""] for i in range(UpperCAmelCase_ )], dim=0 )
A__ = inv_freq
for k, v in state_dict.items():
A__ = filename
param_count += v.numel()
torch.save(UpperCAmelCase_, os.path.join(UpperCAmelCase_, UpperCAmelCase_ ) )
A__ = F"""pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"""
if model_size == "7B":
# Unsharded
A__ = {
"model.embed_tokens.weight": loaded["tok_embeddings.weight"],
"model.norm.weight": loaded["norm.weight"],
"lm_head.weight": loaded["output.weight"],
}
else:
A__ = {
"model.norm.weight": loaded[0]["norm.weight"],
"model.embed_tokens.weight": torch.cat(
[loaded[i]["tok_embeddings.weight"] for i in range(UpperCAmelCase_ )], dim=1 ),
"lm_head.weight": torch.cat([loaded[i]["output.weight"] for i in range(UpperCAmelCase_ )], dim=0 ),
}
for k, v in state_dict.items():
A__ = filename
param_count += v.numel()
torch.save(UpperCAmelCase_, os.path.join(UpperCAmelCase_, UpperCAmelCase_ ) )
# Write configs
A__ = {"total_size": param_count * 2}
write_json(UpperCAmelCase_, os.path.join(UpperCAmelCase_, "pytorch_model.bin.index.json" ) )
A__ = params["ffn_dim_multiplier"] if "ffn_dim_multiplier" in params else 1
A__ = params["multiple_of"] if "multiple_of" in params else 256
A__ = LlamaConfig(
hidden_size=UpperCAmelCase_, intermediate_size=compute_intermediate_size(UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ ), num_attention_heads=params["n_heads"], num_hidden_layers=params["n_layers"], rms_norm_eps=params["norm_eps"], num_key_value_heads=UpperCAmelCase_, )
config.save_pretrained(UpperCAmelCase_ )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("Loading the checkpoint in a Llama model." )
A__ = LlamaForCausalLM.from_pretrained(UpperCAmelCase_, torch_dtype=torch.floataa, low_cpu_mem_usage=UpperCAmelCase_ )
# Avoid saving this as part of the config.
del model.config._name_or_path
print("Saving in the Transformers format." )
model.save_pretrained(UpperCAmelCase_, safe_serialization=UpperCAmelCase_ )
shutil.rmtree(UpperCAmelCase_ )
def _lowerCamelCase ( UpperCAmelCase_ : Optional[Any], UpperCAmelCase_ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
A__ = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(F"""Saving a {tokenizer_class.__name__} to {tokenizer_path}.""" )
A__ = tokenizer_class(UpperCAmelCase_ )
tokenizer.save_pretrained(UpperCAmelCase_ )
def _lowerCamelCase ( ) -> int:
"""simple docstring"""
A__ = argparse.ArgumentParser()
parser.add_argument(
"--input_dir", help="Location of LLaMA weights, which contains tokenizer.model and model folders", )
parser.add_argument(
"--model_size", choices=["7B", "7Bf", "13B", "13Bf", "30B", "65B", "70B", "70Bf", "tokenizer_only"], )
parser.add_argument(
"--output_dir", help="Location to write HF model and tokenizer", )
parser.add_argument("--safe_serialization", type=UpperCAmelCase_, help="Whether or not to save using `safetensors`." )
A__ = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir, input_base_path=os.path.join(args.input_dir, args.model_size ), model_size=args.model_size, safe_serialization=args.safe_serialization, )
A__ = os.path.join(args.input_dir, "tokenizer.model" )
write_tokenizer(args.output_dir, UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 562 | 1 |
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
__lowerCAmelCase = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
__lowerCAmelCase = 1_0
__lowerCAmelCase = 2_5_6
def __lowerCamelCase ( _lowerCAmelCase ) -> Optional[MinHash]:
if len(UpperCAmelCase_ ) < MIN_NUM_TOKENS:
return None
_UpperCAmelCase = MinHash(num_perm=UpperCAmelCase_ )
for token in set(UpperCAmelCase_ ):
min_hash.update(token.encode() )
return min_hash
def __lowerCamelCase ( _lowerCAmelCase ) -> Set[str]:
return {t for t in NON_ALPHA.split(UpperCAmelCase_ ) if len(t.strip() ) > 0}
class __SCREAMING_SNAKE_CASE :
def __init__( self : int , *,
__UpperCamelCase : str = 0.85 , ):
_UpperCAmelCase = duplication_jaccard_threshold
_UpperCAmelCase = NUM_PERM
_UpperCAmelCase = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
_UpperCAmelCase = defaultdict(UpperCamelCase_ )
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Any ):
_UpperCAmelCase = self._index.query(UpperCamelCase_ )
if code_key in self._index.keys:
print(F'''Duplicate key {code_key}''' )
return
self._index.insert(UpperCamelCase_ , UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(UpperCamelCase_ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(UpperCamelCase_ )
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase = []
for base, duplicates in self._duplicate_clusters.items():
_UpperCAmelCase = [base] + list(UpperCamelCase_ )
# reformat the cluster to be a list of dict
_UpperCAmelCase = [{"base_index": el[0], "repo_name": el[1], "path": el[2]} for el in cluster]
duplicate_clusters.append(UpperCamelCase_ )
return duplicate_clusters
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : int ):
_UpperCAmelCase = self.get_duplicate_clusters()
with open(UpperCamelCase_ , "w" ) as f:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
def __lowerCamelCase ( _lowerCAmelCase ) -> Union[str, Any]:
_UpperCAmelCase , _UpperCAmelCase = element
_UpperCAmelCase = get_min_hash([t for t in NON_ALPHA.split(data["content"] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def __lowerCamelCase ( _lowerCAmelCase ) -> List[str]:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(UpperCAmelCase_ , max_queue_size=10_000 ) , chunksize=100 , ):
if data is not None:
yield data
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
_UpperCAmelCase = DuplicationIndex(duplication_jaccard_threshold=UpperCAmelCase_ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(UpperCAmelCase_ ) ) , max_queue_size=100 ) ):
di.add(UpperCAmelCase_ , UpperCAmelCase_ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> float:
_UpperCAmelCase = get_tokens(UpperCAmelCase_ )
_UpperCAmelCase = get_tokens(UpperCAmelCase_ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
__lowerCAmelCase = None
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
_UpperCAmelCase = []
for elementa in cluster:
_UpperCAmelCase = _shared_dataset[elementa["base_index"]]["content"]
for elementa in extremes:
_UpperCAmelCase = _shared_dataset[elementa["base_index"]]["content"]
if jaccard_similarity(UpperCAmelCase_ , UpperCAmelCase_ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_UpperCAmelCase = 1
extremes.append(UpperCAmelCase_ )
return extremes
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
global _shared_dataset
_UpperCAmelCase = dataset
_UpperCAmelCase = []
_UpperCAmelCase = partial(_find_cluster_extremes_shared , jaccard_threshold=UpperCAmelCase_ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
UpperCAmelCase_ , UpperCAmelCase_ , ) , total=len(UpperCAmelCase_ ) , ):
extremes_list.append(UpperCAmelCase_ )
return extremes_list
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
_UpperCAmelCase = make_duplicate_clusters(UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCAmelCase = {x["base_index"] for cluster in duplicate_clusters for x in cluster}
_UpperCAmelCase = {}
_UpperCAmelCase = find_extremes(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
for extremes in extremes_clusters:
for element in extremes:
_UpperCAmelCase = element
_UpperCAmelCase = duplicate_indices - set(extreme_dict.keys() )
_UpperCAmelCase = dataset.filter(lambda _lowerCAmelCase , _lowerCAmelCase : idx not in remove_indices , with_indices=UpperCAmelCase_ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_UpperCAmelCase = element["base_index"] in extreme_dict
if element["is_extreme"]:
_UpperCAmelCase = extreme_dict[element["base_index"]]["copies"]
print(F'''Original dataset size: {len(UpperCAmelCase_ )}''' )
print(F'''Number of duplicate clusters: {len(UpperCAmelCase_ )}''' )
print(F'''Files in duplicate cluster: {len(UpperCAmelCase_ )}''' )
print(F'''Unique files in duplicate cluster: {len(UpperCAmelCase_ )}''' )
print(F'''Filtered dataset size: {len(UpperCAmelCase_ )}''' )
return ds_filter, duplicate_clusters
| 684 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __snake_case ( unittest.TestCase ):
def _snake_case ( self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _snake_case ( self ) -> str:
snake_case__ = 1
snake_case__ = 3
snake_case__ = (32, 32)
snake_case__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCamelCase_ )
return image
@property
def _snake_case ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
snake_case__ = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=UpperCamelCase_ , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def _snake_case ( self ) -> Optional[Any]:
torch.manual_seed(0 )
snake_case__ = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def _snake_case ( self ) -> List[str]:
torch.manual_seed(0 )
snake_case__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
return CLIPTextModel(UpperCamelCase_ )
def _snake_case ( self ) -> Union[str, Any]:
snake_case__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case__ = self.dummy_cond_unet_upscale
snake_case__ = DDPMScheduler()
snake_case__ = DDIMScheduler(prediction_type='v_prediction' )
snake_case__ = self.dummy_vae
snake_case__ = self.dummy_text_encoder
snake_case__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
snake_case__ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case__ = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert('RGB' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
snake_case__ = StableDiffusionUpscalePipeline(
unet=UpperCamelCase_ , low_res_scheduler=UpperCamelCase_ , scheduler=UpperCamelCase_ , vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , max_noise_level=350 , )
snake_case__ = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
snake_case__ = 'A painting of a squirrel eating a burger'
snake_case__ = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
snake_case__ = sd_pipe(
[prompt] , image=UpperCamelCase_ , generator=UpperCamelCase_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
snake_case__ = output.images
snake_case__ = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
snake_case__ = sd_pipe(
[prompt] , image=UpperCamelCase_ , generator=UpperCamelCase_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , return_dict=UpperCamelCase_ , )[0]
snake_case__ = image[0, -3:, -3:, -1]
snake_case__ = image_from_tuple[0, -3:, -3:, -1]
snake_case__ = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
snake_case__ = np.array([0.3_1_1_3, 0.3_9_1_0, 0.4_2_7_2, 0.4_8_5_9, 0.5_0_6_1, 0.4_6_5_2, 0.5_3_6_2, 0.5_7_1_5, 0.5_6_6_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> List[str]:
snake_case__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case__ = self.dummy_cond_unet_upscale
snake_case__ = DDPMScheduler()
snake_case__ = DDIMScheduler(prediction_type='v_prediction' )
snake_case__ = self.dummy_vae
snake_case__ = self.dummy_text_encoder
snake_case__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
snake_case__ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case__ = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert('RGB' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
snake_case__ = StableDiffusionUpscalePipeline(
unet=UpperCamelCase_ , low_res_scheduler=UpperCamelCase_ , scheduler=UpperCamelCase_ , vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , max_noise_level=350 , )
snake_case__ = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
snake_case__ = 'A painting of a squirrel eating a burger'
snake_case__ = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
snake_case__ = output.images
assert image.shape[0] == 2
snake_case__ = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
snake_case__ = sd_pipe(
[prompt] , image=UpperCamelCase_ , generator=UpperCamelCase_ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
snake_case__ = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def _snake_case ( self ) -> str:
snake_case__ = self.dummy_cond_unet_upscale
snake_case__ = DDPMScheduler()
snake_case__ = DDIMScheduler(prediction_type='v_prediction' )
snake_case__ = self.dummy_vae
snake_case__ = self.dummy_text_encoder
snake_case__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
snake_case__ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case__ = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert('RGB' ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
snake_case__ = unet.half()
snake_case__ = text_encoder.half()
# make sure here that pndm scheduler skips prk
snake_case__ = StableDiffusionUpscalePipeline(
unet=UpperCamelCase_ , low_res_scheduler=UpperCamelCase_ , scheduler=UpperCamelCase_ , vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , max_noise_level=350 , )
snake_case__ = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
snake_case__ = 'A painting of a squirrel eating a burger'
snake_case__ = torch.manual_seed(0 )
snake_case__ = sd_pipe(
[prompt] , image=UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=2 , output_type='np' , ).images
snake_case__ = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def _snake_case ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ) -> Optional[int]:
snake_case__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
snake_case__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat.npy' )
snake_case__ = 'stabilityai/stable-diffusion-x4-upscaler'
snake_case__ = StableDiffusionUpscalePipeline.from_pretrained(UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
snake_case__ = 'a cat sitting on a park bench'
snake_case__ = torch.manual_seed(0 )
snake_case__ = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type='np' , )
snake_case__ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def _snake_case ( self ) -> List[Any]:
snake_case__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
snake_case__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat_fp16.npy' )
snake_case__ = 'stabilityai/stable-diffusion-x4-upscaler'
snake_case__ = StableDiffusionUpscalePipeline.from_pretrained(
UpperCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
snake_case__ = 'a cat sitting on a park bench'
snake_case__ = torch.manual_seed(0 )
snake_case__ = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type='np' , )
snake_case__ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def _snake_case ( self ) -> int:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
snake_case__ = 'stabilityai/stable-diffusion-x4-upscaler'
snake_case__ = StableDiffusionUpscalePipeline.from_pretrained(
UpperCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
snake_case__ = 'a cat sitting on a park bench'
snake_case__ = torch.manual_seed(0 )
snake_case__ = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=5 , output_type='np' , )
snake_case__ = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 368 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ : int = logging.get_logger(__name__)
lowerCAmelCase_ : List[Any] = {
"""facebook/data2vec-vision-base-ft""": (
"""https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"""
),
}
class SCREAMING_SNAKE_CASE ( A_ ):
'''simple docstring'''
UpperCAmelCase__ = '''data2vec-vision'''
def __init__( self : Optional[Any] , lowercase__ : Tuple=768 , lowercase__ : Optional[Any]=12 , lowercase__ : Tuple=12 , lowercase__ : List[str]=3_072 , lowercase__ : List[str]="gelu" , lowercase__ : Optional[Any]=0.0 , lowercase__ : Any=0.0 , lowercase__ : List[str]=0.0_2 , lowercase__ : Union[str, Any]=1e-12 , lowercase__ : List[Any]=224 , lowercase__ : Tuple=16 , lowercase__ : Union[str, Any]=3 , lowercase__ : Union[str, Any]=False , lowercase__ : Tuple=False , lowercase__ : Tuple=False , lowercase__ : Optional[Any]=False , lowercase__ : Dict=0.1 , lowercase__ : int=0.1 , lowercase__ : int=True , lowercase__ : Dict=[3, 5, 7, 11] , lowercase__ : List[str]=[1, 2, 3, 6] , lowercase__ : Union[str, Any]=True , lowercase__ : str=0.4 , lowercase__ : List[Any]=256 , lowercase__ : Optional[Any]=1 , lowercase__ : Optional[Any]=False , lowercase__ : int=255 , **lowercase__ : Optional[Any] , ) ->Optional[int]:
'''simple docstring'''
super().__init__(**lowercase__ )
_UpperCamelCase : Optional[Any] = hidden_size
_UpperCamelCase : str = num_hidden_layers
_UpperCamelCase : Optional[Any] = num_attention_heads
_UpperCamelCase : Optional[int] = intermediate_size
_UpperCamelCase : str = hidden_act
_UpperCamelCase : Optional[Any] = hidden_dropout_prob
_UpperCamelCase : List[str] = attention_probs_dropout_prob
_UpperCamelCase : Tuple = initializer_range
_UpperCamelCase : str = layer_norm_eps
_UpperCamelCase : Any = image_size
_UpperCamelCase : Any = patch_size
_UpperCamelCase : Any = num_channels
_UpperCamelCase : Union[str, Any] = use_mask_token
_UpperCamelCase : Any = use_absolute_position_embeddings
_UpperCamelCase : Any = use_relative_position_bias
_UpperCamelCase : Tuple = use_shared_relative_position_bias
_UpperCamelCase : str = layer_scale_init_value
_UpperCamelCase : Dict = drop_path_rate
_UpperCamelCase : Any = use_mean_pooling
# decode head attributes (semantic segmentation)
_UpperCamelCase : Dict = out_indices
_UpperCamelCase : Union[str, Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
_UpperCamelCase : List[str] = use_auxiliary_head
_UpperCamelCase : Optional[Any] = auxiliary_loss_weight
_UpperCamelCase : int = auxiliary_channels
_UpperCamelCase : List[Any] = auxiliary_num_convs
_UpperCamelCase : int = auxiliary_concat_input
_UpperCamelCase : Dict = semantic_loss_ignore_index
class SCREAMING_SNAKE_CASE ( A_ ):
'''simple docstring'''
UpperCAmelCase__ = version.parse('''1.11''' )
@property
def snake_case__ ( self : Union[str, Any] ) ->str:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def snake_case__ ( self : Union[str, Any] ) ->Dict:
'''simple docstring'''
return 1e-4
| 708 | '''simple docstring'''
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def snake_case__ ( self : Dict , lowercase__ : str ) ->Tuple:
'''simple docstring'''
with open(lowercase__ , encoding="utf-8" ) as input_file:
_UpperCamelCase : Optional[int] = re.compile(r"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
_UpperCamelCase : Dict = input_file.read()
_UpperCamelCase : Dict = regexp.search(lowercase__ )
return match
def snake_case__ ( self : str , lowercase__ : str ) ->Tuple:
'''simple docstring'''
with open(lowercase__ , encoding="utf-8" ) as input_file:
_UpperCamelCase : str = re.compile(r"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
_UpperCamelCase : Optional[Any] = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
_UpperCamelCase : List[str] = regexp.finditer(lowercase__ )
_UpperCamelCase : Any = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def snake_case__ ( self : Optional[int] ) ->int:
'''simple docstring'''
_UpperCamelCase : Any = Path("./datasets" )
_UpperCamelCase : List[Any] = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(lowercase__ ) ):
raise AssertionError(f'''open(...) must use utf-8 encoding in {dataset}''' )
def snake_case__ ( self : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : Any = Path("./datasets" )
_UpperCamelCase : Any = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(lowercase__ ) ):
raise AssertionError(f'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
| 204 | 0 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
lowerCAmelCase__ = 'hf-internal-testing/tiny-random-bert'
lowerCAmelCase__ = os.path.join(TRANSFORMERS_CACHE, 'models--hf-internal-testing--tiny-random-bert')
lowerCAmelCase__ = '9b8c223d42b2188cb49d29af482996f9d0f3e5a6'
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : str = cached_file(UpperCAmelCase_ , UpperCAmelCase_)
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(UpperCAmelCase_))
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(UpperCAmelCase_ , UpperCAmelCase_)))
with open(os.path.join(UpperCAmelCase_ , 'refs' , 'main')) as f:
UpperCamelCase__ : List[Any] = f.read()
self.assertEqual(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , 'snapshots' , UpperCAmelCase_ , UpperCAmelCase_))
self.assertTrue(os.path.isfile(UpperCAmelCase_))
# File is cached at the same place the second time.
UpperCamelCase__ : str = cached_file(UpperCAmelCase_ , UpperCAmelCase_)
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_)
# Using a specific revision to test the full commit hash.
UpperCamelCase__ : List[Any] = cached_file(UpperCAmelCase_ , UpperCAmelCase_ , revision='9b8c223')
self.assertEqual(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , 'snapshots' , UpperCAmelCase_ , UpperCAmelCase_))
def __UpperCamelCase ( self : str):
with self.assertRaisesRegex(UpperCAmelCase_ , 'is not a valid model identifier'):
UpperCamelCase__ : Dict = cached_file('tiny-random-bert' , UpperCAmelCase_)
with self.assertRaisesRegex(UpperCAmelCase_ , 'is not a valid git identifier'):
UpperCamelCase__ : List[Any] = cached_file(UpperCAmelCase_ , UpperCAmelCase_ , revision='aaaa')
with self.assertRaisesRegex(UpperCAmelCase_ , 'does not appear to have a file named'):
UpperCamelCase__ : Tuple = cached_file(UpperCAmelCase_ , 'conf')
def __UpperCamelCase ( self : str):
with self.assertRaisesRegex(UpperCAmelCase_ , 'does not appear to have a file named'):
UpperCamelCase__ : Any = cached_file(UpperCAmelCase_ , 'conf')
with open(os.path.join(UpperCAmelCase_ , 'refs' , 'main')) as f:
UpperCamelCase__ : Dict = f.read()
self.assertTrue(os.path.isfile(os.path.join(UpperCAmelCase_ , '.no_exist' , UpperCAmelCase_ , 'conf')))
UpperCamelCase__ : List[Any] = cached_file(UpperCAmelCase_ , 'conf' , _raise_exceptions_for_missing_entries=UpperCAmelCase_)
self.assertIsNone(UpperCAmelCase_)
UpperCamelCase__ : List[Any] = cached_file(UpperCAmelCase_ , 'conf' , local_files_only=UpperCAmelCase_ , _raise_exceptions_for_missing_entries=UpperCAmelCase_)
self.assertIsNone(UpperCAmelCase_)
UpperCamelCase__ : int = mock.Mock()
UpperCamelCase__ : Tuple = 500
UpperCamelCase__ : str = {}
UpperCamelCase__ : Optional[int] = HTTPError
UpperCamelCase__ : Any = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=UpperCAmelCase_) as mock_head:
UpperCamelCase__ : Optional[Any] = cached_file(UpperCAmelCase_ , 'conf' , _raise_exceptions_for_connection_errors=UpperCAmelCase_)
self.assertIsNone(UpperCAmelCase_)
# This check we did call the fake head request
mock_head.assert_called()
def __UpperCamelCase ( self : List[str]):
self.assertTrue(has_file('hf-internal-testing/tiny-bert-pt-only' , UpperCAmelCase_))
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , UpperCAmelCase_))
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , UpperCAmelCase_))
def __UpperCamelCase ( self : Any):
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo('bert-base-cased' , 'ahah.txt'))
# The function raises if the repository does not exist.
with self.assertRaisesRegex(UpperCAmelCase_ , 'is not a valid model identifier'):
get_file_from_repo('bert-base-case' , UpperCAmelCase_)
# The function raises if the revision does not exist.
with self.assertRaisesRegex(UpperCAmelCase_ , 'is not a valid git identifier'):
get_file_from_repo('bert-base-cased' , UpperCAmelCase_ , revision='ahaha')
UpperCamelCase__ : Union[str, Any] = get_file_from_repo('bert-base-cased' , UpperCAmelCase_)
# The name is the cached name which is not very easy to test, so instead we load the content.
UpperCamelCase__ : Optional[Any] = json.loads(open(UpperCAmelCase_ , 'r').read())
self.assertEqual(config['hidden_size'] , 768)
def __UpperCamelCase ( self : Any):
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase__ : List[str] = Path(UpperCAmelCase_) / 'a.txt'
filename.touch()
self.assertEqual(get_file_from_repo(UpperCAmelCase_ , 'a.txt') , str(UpperCAmelCase_))
self.assertIsNone(get_file_from_repo(UpperCAmelCase_ , 'b.txt'))
| 596 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class __lowercase (unittest.TestCase ):
def __init__( self : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str=13 , UpperCAmelCase_ : Optional[Any]=7 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Optional[Any]=99 , UpperCAmelCase_ : Dict=32 , UpperCAmelCase_ : Optional[Any]=5 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : Optional[int]=37 , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Dict=512 , UpperCAmelCase_ : Tuple=16 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Union[str, Any]=0.02 , UpperCAmelCase_ : int=4 , ):
UpperCamelCase__ : Dict = parent
UpperCamelCase__ : Any = batch_size
UpperCamelCase__ : Dict = seq_length
UpperCamelCase__ : Any = is_training
UpperCamelCase__ : int = use_attention_mask
UpperCamelCase__ : Dict = use_token_type_ids
UpperCamelCase__ : Optional[Any] = use_labels
UpperCamelCase__ : Dict = vocab_size
UpperCamelCase__ : str = hidden_size
UpperCamelCase__ : Union[str, Any] = num_hidden_layers
UpperCamelCase__ : Any = num_attention_heads
UpperCamelCase__ : Tuple = intermediate_size
UpperCamelCase__ : Optional[int] = hidden_act
UpperCamelCase__ : Optional[Any] = hidden_dropout_prob
UpperCamelCase__ : List[str] = attention_probs_dropout_prob
UpperCamelCase__ : int = max_position_embeddings
UpperCamelCase__ : List[str] = type_vocab_size
UpperCamelCase__ : Any = type_sequence_label_size
UpperCamelCase__ : Optional[int] = initializer_range
UpperCamelCase__ : Dict = num_choices
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCamelCase__ : List[str] = None
if self.use_attention_mask:
UpperCamelCase__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length])
UpperCamelCase__ : Optional[Any] = None
if self.use_token_type_ids:
UpperCamelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
UpperCamelCase__ : Dict = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : int = self.prepare_config_and_inputs()
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : int = config_and_inputs
UpperCamelCase__ : List[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class __lowercase (__lowerCamelCase , unittest.TestCase ):
_lowerCamelCase = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCamelCase ( self : List[str]):
UpperCamelCase__ : List[Any] = FlaxAlbertModelTester(self)
@slow
def __UpperCamelCase ( self : int):
for model_class_name in self.all_model_classes:
UpperCamelCase__ : Dict = model_class_name.from_pretrained('albert-base-v2')
UpperCamelCase__ : Tuple = model(np.ones((1, 1)))
self.assertIsNotNone(UpperCAmelCase_)
@require_flax
class __lowercase (unittest.TestCase ):
@slow
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : int = FlaxAlbertModel.from_pretrained('albert-base-v2')
UpperCamelCase__ : Dict = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]])
UpperCamelCase__ : str = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
UpperCamelCase__ : Dict = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_)[0]
UpperCamelCase__ : List[str] = (1, 11, 768)
self.assertEqual(output.shape , UpperCAmelCase_)
UpperCamelCase__ : Dict = np.array(
[[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]])
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , UpperCAmelCase_ , atol=1e-4))
| 596 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowerCAmelCase ( snake_case_ , unittest.TestCase ):
__UpperCAmelCase : Tuple = KandinskyVaaInpaintPipeline
__UpperCAmelCase : str = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''']
__UpperCAmelCase : Tuple = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
'''mask_image''',
]
__UpperCAmelCase : Tuple = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
__UpperCAmelCase : Tuple = False
@property
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
return 32
@property
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
return 32
@property
def lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
return self.time_input_dim
@property
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return 100
@property
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
snake_case : str = {
"in_channels": 9,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
snake_case : Tuple = UNetaDConditionModel(**UpperCamelCase__ )
return model
@property
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case : Optional[int] = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
snake_case : List[str] = self.dummy_unet
snake_case : List[str] = self.dummy_movq
snake_case : Optional[Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.00085 , beta_end=0.012 , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , steps_offset=1 , prediction_type="epsilon" , thresholding=UpperCamelCase__ , )
snake_case : str = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__=0 ) -> List[Any]:
'''simple docstring'''
snake_case : Tuple = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
snake_case : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
UpperCamelCase__ )
# create init_image
snake_case : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
snake_case : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case : List[str] = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert("RGB" ).resize((256, 256) )
# create mask
snake_case : Union[str, Any] = np.ones((64, 64) , dtype=np.floataa )
snake_case : Tuple = 0
if str(UpperCamelCase__ ).startswith("mps" ):
snake_case : str = torch.manual_seed(UpperCamelCase__ )
else:
snake_case : int = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
snake_case : Any = {
"image": init_image,
"mask_image": mask,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 2,
"guidance_scale": 4.0,
"output_type": "np",
}
return inputs
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
snake_case : Dict = "cpu"
snake_case : Tuple = self.get_dummy_components()
snake_case : Tuple = self.pipeline_class(**UpperCamelCase__ )
snake_case : Dict = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
snake_case : Any = pipe(**self.get_dummy_inputs(UpperCamelCase__ ) )
snake_case : Any = output.images
snake_case : List[Any] = pipe(
**self.get_dummy_inputs(UpperCamelCase__ ) , return_dict=UpperCamelCase__ , )[0]
snake_case : Any = image[0, -3:, -3:, -1]
snake_case : Tuple = image_from_tuple[0, -3:, -3:, -1]
print(F'image.shape {image.shape}' )
assert image.shape == (1, 64, 64, 3)
snake_case : Dict = np.array(
[0.50775903, 0.49527195, 0.48824543, 0.50192237, 0.48644906, 0.49373814, 0.4780598, 0.47234827, 0.48327848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
snake_case : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy" )
snake_case : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
snake_case : int = np.ones((768, 768) , dtype=np.floataa )
snake_case : int = 0
snake_case : Any = "a hat"
snake_case : Any = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(UpperCamelCase__ )
snake_case : str = KandinskyVaaInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder-inpaint" , torch_dtype=torch.floataa )
snake_case : Optional[int] = pipeline.to(UpperCamelCase__ )
pipeline.set_progress_bar_config(disable=UpperCamelCase__ )
snake_case : Dict = torch.Generator(device="cpu" ).manual_seed(0 )
snake_case : Dict = pipe_prior(
UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
snake_case : Dict = pipeline(
image=UpperCamelCase__ , mask_image=UpperCamelCase__ , image_embeds=UpperCamelCase__ , negative_image_embeds=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=100 , height=768 , width=768 , output_type="np" , )
snake_case : Optional[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
| 704 |
"""simple docstring"""
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
__snake_case = """"""
if version.parse(importlib_metadata.version("""jiwer""")) < version.parse("""2.3.0"""):
class _lowerCAmelCase ( tr.AbstractTransform ):
def __init__( self , UpperCamelCase__ = " " ) -> Any:
'''simple docstring'''
snake_case : Optional[Any] = sentence_delimiter
def lowerCamelCase ( self , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
return list(UpperCamelCase__ )
def lowerCamelCase ( self , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
snake_case : Dict = []
for sent_idx, sentence in enumerate(UpperCamelCase__ ):
chars.extend(self.process_string(UpperCamelCase__ ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(UpperCamelCase__ ) - 1:
chars.append(self.sentence_delimiter )
return chars
__snake_case = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
__snake_case = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
__snake_case = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
__snake_case = """\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
"""
__snake_case = """
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> cer = datasets.load_metric(\"cer\")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
"https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates",
] , )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ) -> Optional[int]:
'''simple docstring'''
if concatenate_texts:
return jiwer.compute_measures(
UpperCamelCase__ , UpperCamelCase__ , truth_transform=UpperCamelCase__ , hypothesis_transform=UpperCamelCase__ , )["wer"]
snake_case : Optional[int] = 0
snake_case : int = 0
for prediction, reference in zip(UpperCamelCase__ , UpperCamelCase__ ):
snake_case : Dict = jiwer.compute_measures(
UpperCamelCase__ , UpperCamelCase__ , truth_transform=UpperCamelCase__ , hypothesis_transform=UpperCamelCase__ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 117 | 0 |
'''simple docstring'''
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Any = logging.get_logger(__name__)
_a : int = {
"""facebook/encodec_24khz""": """https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json""",
"""facebook/encodec_48khz""": """https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json""",
}
class _UpperCAmelCase ( __UpperCamelCase ):
a : List[Any] ='''encodec'''
def __init__( self,__SCREAMING_SNAKE_CASE=[1.5, 3.0, 6.0, 12.0, 24.0],__SCREAMING_SNAKE_CASE=2_40_00,__SCREAMING_SNAKE_CASE=1,__SCREAMING_SNAKE_CASE=False,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=1_28,__SCREAMING_SNAKE_CASE=32,__SCREAMING_SNAKE_CASE=1,__SCREAMING_SNAKE_CASE=[8, 5, 4, 2],__SCREAMING_SNAKE_CASE="weight_norm",__SCREAMING_SNAKE_CASE=7,__SCREAMING_SNAKE_CASE=7,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE="reflect",__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=1.0,__SCREAMING_SNAKE_CASE=10_24,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=True,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = target_bandwidths
__lowerCAmelCase = sampling_rate
__lowerCAmelCase = audio_channels
__lowerCAmelCase = normalize
__lowerCAmelCase = chunk_length_s
__lowerCAmelCase = overlap
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_filters
__lowerCAmelCase = num_residual_layers
__lowerCAmelCase = upsampling_ratios
__lowerCAmelCase = norm_type
__lowerCAmelCase = kernel_size
__lowerCAmelCase = last_kernel_size
__lowerCAmelCase = residual_kernel_size
__lowerCAmelCase = dilation_growth_rate
__lowerCAmelCase = use_causal_conv
__lowerCAmelCase = pad_mode
__lowerCAmelCase = compress
__lowerCAmelCase = num_lstm_layers
__lowerCAmelCase = trim_right_ratio
__lowerCAmelCase = codebook_size
__lowerCAmelCase = codebook_dim if codebook_dim is not None else hidden_size
__lowerCAmelCase = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}' )
super().__init__(**__SCREAMING_SNAKE_CASE )
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1,int((1.0 - self.overlap) * self.chunk_length ) )
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return int(10_00 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 689 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class __SCREAMING_SNAKE_CASE ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def UpperCamelCase( self ):
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=lowerCamelCase , )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )]
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(lowerCamelCase )
class __SCREAMING_SNAKE_CASE ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def UpperCamelCase( self ):
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=lowerCamelCase , )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase ):
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} )
]
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(lowerCamelCase )
def snake_case_ ( ):
'''simple docstring'''
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )]
def snake_case_ ( ):
'''simple docstring'''
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )]
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
'''simple docstring'''
@require_beam
def UpperCamelCase( self ):
_snake_case = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_snake_case = DummyBeamDataset(cache_dir=lowerCamelCase , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(lowerCamelCase , builder.name , "default" , "0.0.0" , F'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
_snake_case = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , lowerCamelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , lowerCamelCase )
self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(lowerCamelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def UpperCamelCase( self ):
import apache_beam as beam
_snake_case = beam.io.parquetio.WriteToParquet
_snake_case = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_snake_case = DummyBeamDataset(cache_dir=lowerCamelCase , beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
_snake_case = partial(lowerCamelCase , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
lowerCamelCase , builder.name , "default" , "0.0.0" , F'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
lowerCamelCase , builder.name , "default" , "0.0.0" , F'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
_snake_case = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , lowerCamelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , lowerCamelCase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(lowerCamelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def UpperCamelCase( self ):
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_snake_case = DummyBeamDataset(cache_dir=lowerCamelCase )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def UpperCamelCase( self ):
_snake_case = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_snake_case = NestedBeamDataset(cache_dir=lowerCamelCase , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(lowerCamelCase , builder.name , "default" , "0.0.0" , F'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
_snake_case = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , lowerCamelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , lowerCamelCase )
self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(lowerCamelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
| 672 | 0 |
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
snake_case_ : Any =logging.get_logger(__name__)
snake_case_ : Tuple =OrderedDict(
[
('''audio-spectrogram-transformer''', '''ASTFeatureExtractor'''),
('''beit''', '''BeitFeatureExtractor'''),
('''chinese_clip''', '''ChineseCLIPFeatureExtractor'''),
('''clap''', '''ClapFeatureExtractor'''),
('''clip''', '''CLIPFeatureExtractor'''),
('''clipseg''', '''ViTFeatureExtractor'''),
('''conditional_detr''', '''ConditionalDetrFeatureExtractor'''),
('''convnext''', '''ConvNextFeatureExtractor'''),
('''cvt''', '''ConvNextFeatureExtractor'''),
('''data2vec-audio''', '''Wav2Vec2FeatureExtractor'''),
('''data2vec-vision''', '''BeitFeatureExtractor'''),
('''deformable_detr''', '''DeformableDetrFeatureExtractor'''),
('''deit''', '''DeiTFeatureExtractor'''),
('''detr''', '''DetrFeatureExtractor'''),
('''dinat''', '''ViTFeatureExtractor'''),
('''donut-swin''', '''DonutFeatureExtractor'''),
('''dpt''', '''DPTFeatureExtractor'''),
('''encodec''', '''EncodecFeatureExtractor'''),
('''flava''', '''FlavaFeatureExtractor'''),
('''glpn''', '''GLPNFeatureExtractor'''),
('''groupvit''', '''CLIPFeatureExtractor'''),
('''hubert''', '''Wav2Vec2FeatureExtractor'''),
('''imagegpt''', '''ImageGPTFeatureExtractor'''),
('''layoutlmv2''', '''LayoutLMv2FeatureExtractor'''),
('''layoutlmv3''', '''LayoutLMv3FeatureExtractor'''),
('''levit''', '''LevitFeatureExtractor'''),
('''maskformer''', '''MaskFormerFeatureExtractor'''),
('''mctct''', '''MCTCTFeatureExtractor'''),
('''mobilenet_v1''', '''MobileNetV1FeatureExtractor'''),
('''mobilenet_v2''', '''MobileNetV2FeatureExtractor'''),
('''mobilevit''', '''MobileViTFeatureExtractor'''),
('''nat''', '''ViTFeatureExtractor'''),
('''owlvit''', '''OwlViTFeatureExtractor'''),
('''perceiver''', '''PerceiverFeatureExtractor'''),
('''poolformer''', '''PoolFormerFeatureExtractor'''),
('''regnet''', '''ConvNextFeatureExtractor'''),
('''resnet''', '''ConvNextFeatureExtractor'''),
('''segformer''', '''SegformerFeatureExtractor'''),
('''sew''', '''Wav2Vec2FeatureExtractor'''),
('''sew-d''', '''Wav2Vec2FeatureExtractor'''),
('''speech_to_text''', '''Speech2TextFeatureExtractor'''),
('''speecht5''', '''SpeechT5FeatureExtractor'''),
('''swiftformer''', '''ViTFeatureExtractor'''),
('''swin''', '''ViTFeatureExtractor'''),
('''swinv2''', '''ViTFeatureExtractor'''),
('''table-transformer''', '''DetrFeatureExtractor'''),
('''timesformer''', '''VideoMAEFeatureExtractor'''),
('''tvlt''', '''TvltFeatureExtractor'''),
('''unispeech''', '''Wav2Vec2FeatureExtractor'''),
('''unispeech-sat''', '''Wav2Vec2FeatureExtractor'''),
('''van''', '''ConvNextFeatureExtractor'''),
('''videomae''', '''VideoMAEFeatureExtractor'''),
('''vilt''', '''ViltFeatureExtractor'''),
('''vit''', '''ViTFeatureExtractor'''),
('''vit_mae''', '''ViTFeatureExtractor'''),
('''vit_msn''', '''ViTFeatureExtractor'''),
('''wav2vec2''', '''Wav2Vec2FeatureExtractor'''),
('''wav2vec2-conformer''', '''Wav2Vec2FeatureExtractor'''),
('''wavlm''', '''Wav2Vec2FeatureExtractor'''),
('''whisper''', '''WhisperFeatureExtractor'''),
('''xclip''', '''CLIPFeatureExtractor'''),
('''yolos''', '''YolosFeatureExtractor'''),
]
)
snake_case_ : Optional[Any] =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def UpperCAmelCase ( lowerCAmelCase__ ):
'''simple docstring'''
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
__A = model_type_to_module_name(_lowerCAmelCase )
__A = importlib.import_module(F""".{module_name}""" , "transformers.models" )
try:
return getattr(_lowerCAmelCase , _lowerCAmelCase )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(_lowerCAmelCase , "__name__" , _lowerCAmelCase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
__A = importlib.import_module("transformers" )
if hasattr(_lowerCAmelCase , _lowerCAmelCase ):
return getattr(_lowerCAmelCase , _lowerCAmelCase )
return None
def UpperCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , **lowerCAmelCase__ , ):
'''simple docstring'''
__A = get_file_from_repo(
_lowerCAmelCase , _lowerCAmelCase , cache_dir=_lowerCAmelCase , force_download=_lowerCAmelCase , resume_download=_lowerCAmelCase , proxies=_lowerCAmelCase , use_auth_token=_lowerCAmelCase , revision=_lowerCAmelCase , local_files_only=_lowerCAmelCase , )
if resolved_config_file is None:
logger.info(
"Could not locate the feature extractor configuration file, will try to use the model config instead." )
return {}
with open(_lowerCAmelCase , encoding="utf-8" ) as reader:
return json.load(_lowerCAmelCase )
class a__ :
def __init__( self ) -> int:
raise EnvironmentError(
"AutoFeatureExtractor is designed to be instantiated "
"using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method." )
@classmethod
@replace_list_option_in_docstrings(_lowerCamelCase )
def _lowerCamelCase ( cls , lowercase__ , **lowercase__ ) -> int:
__A = kwargs.pop("config" , _lowerCamelCase )
__A = kwargs.pop("trust_remote_code" , _lowerCamelCase )
__A = True
__A , __A = FeatureExtractionMixin.get_feature_extractor_dict(_lowerCamelCase , **_lowerCamelCase )
__A = config_dict.get("feature_extractor_type" , _lowerCamelCase )
__A = None
if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ):
__A = config_dict["auto_map"]["AutoFeatureExtractor"]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
__A = AutoConfig.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
# It could be in `config.feature_extractor_type``
__A = getattr(_lowerCamelCase , "feature_extractor_type" , _lowerCamelCase )
if hasattr(_lowerCamelCase , "auto_map" ) and "AutoFeatureExtractor" in config.auto_map:
__A = config.auto_map["AutoFeatureExtractor"]
if feature_extractor_class is not None:
__A = feature_extractor_class_from_name(_lowerCamelCase )
__A = feature_extractor_auto_map is not None
__A = feature_extractor_class is not None or type(_lowerCamelCase ) in FEATURE_EXTRACTOR_MAPPING
__A = resolve_trust_remote_code(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if has_remote_code and trust_remote_code:
__A = get_class_from_dynamic_module(
_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
__A = kwargs.pop("code_revision" , _lowerCamelCase )
if os.path.isdir(_lowerCamelCase ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(_lowerCamelCase , **_lowerCamelCase )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(_lowerCamelCase , **_lowerCamelCase )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(_lowerCamelCase ) in FEATURE_EXTRACTOR_MAPPING:
__A = FEATURE_EXTRACTOR_MAPPING[type(_lowerCamelCase )]
return feature_extractor_class.from_dict(_lowerCamelCase , **_lowerCamelCase )
raise ValueError(
F"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """
F"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def _lowerCamelCase ( lowercase__ , lowercase__ ) -> str:
FEATURE_EXTRACTOR_MAPPING.register(_lowerCamelCase , _lowerCamelCase )
| 701 |
class a__ :
def __init__( self ) -> str:
__A = 0
__A = 0
__A = {}
def _lowerCamelCase ( self , lowercase__ ) -> List[Any]:
if vertex not in self.adjacency:
__A = {}
self.num_vertices += 1
def _lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ ) -> Union[str, Any]:
self.add_vertex(lowercase__ )
self.add_vertex(lowercase__ )
if head == tail:
return
__A = weight
__A = weight
def _lowerCamelCase ( self ) -> List[str]:
__A = self.get_edges()
for edge in edges:
__A , __A , __A = edge
edges.remove((tail, head, weight) )
for i in range(len(lowercase__ ) ):
__A = list(edges[i] )
edges.sort(key=lambda lowercase__ : e[2] )
for i in range(len(lowercase__ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
__A = edges[i][2] + 1
for edge in edges:
__A , __A , __A = edge
__A = weight
__A = weight
def __str__( self ) -> Union[str, Any]:
__A = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
__A = self.adjacency[head][tail]
string += F"""{head} -> {tail} == {weight}\n"""
return string.rstrip("\n" )
def _lowerCamelCase ( self ) -> Union[str, Any]:
__A = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def _lowerCamelCase ( self ) -> Tuple:
return self.adjacency.keys()
@staticmethod
def _lowerCamelCase ( lowercase__=None , lowercase__=None ) -> Any:
__A = Graph()
if vertices is None:
__A = []
if edges is None:
__A = []
for vertex in vertices:
g.add_vertex(lowercase__ )
for edge in edges:
g.add_edge(*lowercase__ )
return g
class a__ :
def __init__( self ) -> List[str]:
__A = {}
__A = {}
def __len__( self ) -> Union[str, Any]:
return len(self.parent )
def _lowerCamelCase ( self , lowercase__ ) -> Any:
if item in self.parent:
return self.find(lowercase__ )
__A = item
__A = 0
return item
def _lowerCamelCase ( self , lowercase__ ) -> str:
if item not in self.parent:
return self.make_set(lowercase__ )
if item != self.parent[item]:
__A = self.find(self.parent[item] )
return self.parent[item]
def _lowerCamelCase ( self , lowercase__ , lowercase__ ) -> List[Any]:
__A = self.find(lowercase__ )
__A = self.find(lowercase__ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
__A = roota
return roota
if self.rank[roota] < self.rank[roota]:
__A = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
__A = roota
return roota
return None
@staticmethod
def _lowerCamelCase ( lowercase__ ) -> Any:
__A = graph.num_vertices
__A = Graph.UnionFind()
__A = []
while num_components > 1:
__A = {}
for vertex in graph.get_vertices():
__A = -1
__A = graph.get_edges()
for edge in edges:
__A , __A , __A = edge
edges.remove((tail, head, weight) )
for edge in edges:
__A , __A , __A = edge
__A = union_find.find(lowercase__ )
__A = union_find.find(lowercase__ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
__A = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
__A = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
__A , __A , __A = cheap_edge[vertex]
if union_find.find(lowercase__ ) != union_find.find(lowercase__ ):
union_find.union(lowercase__ , lowercase__ )
mst_edges.append(cheap_edge[vertex] )
__A = num_components - 1
__A = Graph.build(edges=lowercase__ )
return mst
| 205 | 0 |
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
a = logging.get_logger(__name__)
a = ["model.decoder.embed_positions.weights"]
def __magic_name__ ( __UpperCAmelCase ) -> Any:
'''simple docstring'''
if "emb" in name:
__SCREAMING_SNAKE_CASE = name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
__SCREAMING_SNAKE_CASE = name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
__SCREAMING_SNAKE_CASE = name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
__SCREAMING_SNAKE_CASE = name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
__SCREAMING_SNAKE_CASE = name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
__SCREAMING_SNAKE_CASE = name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
__SCREAMING_SNAKE_CASE = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
__SCREAMING_SNAKE_CASE = name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
__SCREAMING_SNAKE_CASE = name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
__SCREAMING_SNAKE_CASE = name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
__SCREAMING_SNAKE_CASE = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Tuple[Dict, Dict]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = list(state_dict.keys() )
__SCREAMING_SNAKE_CASE = {}
for key in keys:
__SCREAMING_SNAKE_CASE = state_dict.pop(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = rename_keys(__UpperCAmelCase )
if "in_proj_weight" in key:
# split fused qkv proj
__SCREAMING_SNAKE_CASE = val[:hidden_size, :]
__SCREAMING_SNAKE_CASE = val[hidden_size : 2 * hidden_size, :]
__SCREAMING_SNAKE_CASE = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
__SCREAMING_SNAKE_CASE = val
else:
__SCREAMING_SNAKE_CASE = val
return state_dict, enc_dec_proj_state_dict
def __magic_name__ ( __UpperCAmelCase ) -> MusicgenDecoderConfig:
'''simple docstring'''
if checkpoint == "small":
# default config values
__SCREAMING_SNAKE_CASE = 1024
__SCREAMING_SNAKE_CASE = 24
__SCREAMING_SNAKE_CASE = 16
elif checkpoint == "medium":
__SCREAMING_SNAKE_CASE = 1536
__SCREAMING_SNAKE_CASE = 48
__SCREAMING_SNAKE_CASE = 24
elif checkpoint == "large":
__SCREAMING_SNAKE_CASE = 2048
__SCREAMING_SNAKE_CASE = 48
__SCREAMING_SNAKE_CASE = 32
else:
raise ValueError(f"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
__SCREAMING_SNAKE_CASE = MusicgenDecoderConfig(
hidden_size=__UpperCAmelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=__UpperCAmelCase , num_attention_heads=__UpperCAmelCase , )
return config
@torch.no_grad()
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="cpu" ) -> List[str]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = MusicGen.get_pretrained(__UpperCAmelCase , device=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = decoder_config_from_checkpoint(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = fairseq_model.lm.state_dict()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = rename_state_dict(
__UpperCAmelCase , hidden_size=decoder_config.hidden_size )
__SCREAMING_SNAKE_CASE = TaEncoderModel.from_pretrained("""t5-base""" )
__SCREAMING_SNAKE_CASE = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
__SCREAMING_SNAKE_CASE = MusicgenForCausalLM(__UpperCAmelCase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = decoder.load_state_dict(__UpperCAmelCase , strict=__UpperCAmelCase )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(__UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
raise ValueError(f"""Missing key(s) in state_dict: {missing_keys}""" )
if len(__UpperCAmelCase ) > 0:
raise ValueError(f"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
__SCREAMING_SNAKE_CASE = MusicgenForConditionalGeneration(text_encoder=__UpperCAmelCase , audio_encoder=__UpperCAmelCase , decoder=__UpperCAmelCase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(__UpperCAmelCase )
# check we can do a forward pass
__SCREAMING_SNAKE_CASE = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
__SCREAMING_SNAKE_CASE = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(input_ids=__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("""t5-base""" )
__SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
__SCREAMING_SNAKE_CASE = MusicgenProcessor(feature_extractor=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
# set the appropriate bos/pad token ids
__SCREAMING_SNAKE_CASE = 2048
__SCREAMING_SNAKE_CASE = 2048
# set other default generation config params
__SCREAMING_SNAKE_CASE = int(30 * audio_encoder.config.frame_rate )
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = 3.0
if pytorch_dump_folder is not None:
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
logger.info(f"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(__UpperCAmelCase )
processor.save_pretrained(__UpperCAmelCase )
if repo_id:
logger.info(f"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(__UpperCAmelCase )
processor.push_to_hub(__UpperCAmelCase )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
a = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 109 |
'''simple docstring'''
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
_A: str = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase_ )
class UpperCAmelCase ( UpperCAmelCase_ ):
def __init__( self , *__A , **__A ):
super().__init__(*__A , **__A )
self.check_model_type(__A )
def __lowerCamelCase ( self , __A=None , __A=None , __A=None , **__A ):
__UpperCAmelCase , __UpperCAmelCase = {}, {}
if padding is not None:
__UpperCAmelCase = padding
if truncation is not None:
__UpperCAmelCase = truncation
if top_k is not None:
__UpperCAmelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , __A , __A = None , **__A ):
if isinstance(__A , (Image.Image, str) ) and isinstance(__A , __A ):
__UpperCAmelCase = {'image': image, 'question': question}
else:
__UpperCAmelCase = image
__UpperCAmelCase = super().__call__(__A , **__A )
return results
def __lowerCamelCase ( self , __A , __A=False , __A=False ):
__UpperCAmelCase = load_image(inputs['image'] )
__UpperCAmelCase = self.tokenizer(
inputs['question'] , return_tensors=self.framework , padding=__A , truncation=__A )
__UpperCAmelCase = self.image_processor(images=__A , return_tensors=self.framework )
model_inputs.update(__A )
return model_inputs
def __lowerCamelCase ( self , __A ):
__UpperCAmelCase = self.model(**__A )
return model_outputs
def __lowerCamelCase ( self , __A , __A=5 ):
if top_k > self.model.config.num_labels:
__UpperCAmelCase = self.model.config.num_labels
if self.framework == "pt":
__UpperCAmelCase = model_outputs.logits.sigmoid()[0]
__UpperCAmelCase , __UpperCAmelCase = probs.topk(__A )
else:
raise ValueError(f'Unsupported framework: {self.framework}' )
__UpperCAmelCase = scores.tolist()
__UpperCAmelCase = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(__A , __A )]
| 126 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class _a ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = tempfile.mkdtemp()
lowercase_ = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
lowercase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
lowercase_ = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"""image_std""": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
lowercase_ = os.path.join(self.tmpdirname , lowercase_ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(lowercase_ , lowercase_ )
def lowerCamelCase__ ( self : Dict , **lowercase_ : Tuple ):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **lowercase_ )
def lowerCamelCase__ ( self : int , **lowercase_ : Any ):
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **lowercase_ )
def lowerCamelCase__ ( self : str , **lowercase_ : Optional[Any] ):
'''simple docstring'''
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **lowercase_ )
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowercase_ = [Image.fromarray(np.moveaxis(lowercase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
lowercase_ = self.get_tokenizer()
lowercase_ = self.get_rust_tokenizer()
lowercase_ = self.get_image_processor()
lowercase_ = AlignProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
processor_slow.save_pretrained(self.tmpdirname )
lowercase_ = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase_ )
lowercase_ = AlignProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
processor_fast.save_pretrained(self.tmpdirname )
lowercase_ = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowercase_ )
self.assertIsInstance(processor_fast.tokenizer , lowercase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowercase_ )
self.assertIsInstance(processor_fast.image_processor , lowercase_ )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase_ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowercase_ = self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0 )
lowercase_ = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowercase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase_ )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = self.get_image_processor()
lowercase_ = self.get_tokenizer()
lowercase_ = AlignProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
lowercase_ = self.prepare_image_inputs()
lowercase_ = image_processor(lowercase_ , return_tensors="""np""" )
lowercase_ = processor(images=lowercase_ , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = self.get_image_processor()
lowercase_ = self.get_tokenizer()
lowercase_ = AlignProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
lowercase_ = """lower newer"""
lowercase_ = processor(text=lowercase_ )
lowercase_ = tokenizer(lowercase_ , padding="""max_length""" , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = self.get_image_processor()
lowercase_ = self.get_tokenizer()
lowercase_ = AlignProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
lowercase_ = """lower newer"""
lowercase_ = self.prepare_image_inputs()
lowercase_ = processor(text=lowercase_ , images=lowercase_ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowercase_ ):
processor()
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ = self.get_image_processor()
lowercase_ = self.get_tokenizer()
lowercase_ = AlignProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
lowercase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase_ = processor.batch_decode(lowercase_ )
lowercase_ = tokenizer.batch_decode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
lowercase_ = self.get_image_processor()
lowercase_ = self.get_tokenizer()
lowercase_ = AlignProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
lowercase_ = """lower newer"""
lowercase_ = self.prepare_image_inputs()
lowercase_ = processor(text=lowercase_ , images=lowercase_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 603 | '''simple docstring'''
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class _a :
"""simple docstring"""
def __init__( self : Dict , lowercase_ : List[Any] , lowercase_ : Dict=13 , lowercase_ : int=7 , lowercase_ : Optional[Any]=True , lowercase_ : str=True , lowercase_ : List[str]=False , lowercase_ : List[str]=True , lowercase_ : Optional[Any]=99 , lowercase_ : int=64 , lowercase_ : Union[str, Any]=5 , lowercase_ : str=4 , lowercase_ : Any=64 , lowercase_ : str="gelu" , lowercase_ : List[str]=0.1 , lowercase_ : List[str]=0.1 , lowercase_ : Dict=512 , lowercase_ : Any=16 , lowercase_ : List[str]=2 , lowercase_ : int=0.0_2 , lowercase_ : List[str]=3 , lowercase_ : Tuple=4 , lowercase_ : Union[str, Any]=None , ):
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_input_mask
lowercase_ = use_token_type_ids
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = num_labels
lowercase_ = num_choices
lowercase_ = scope
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
return MPNetConfig.from_pretrained("""microsoft/mpnet-base""" )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ = None
if self.use_input_mask:
lowercase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ = None
lowercase_ = None
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def lowerCamelCase__ ( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Tuple , lowercase_ : int , lowercase_ : List[Any] , lowercase_ : Optional[Any] , lowercase_ : List[str] ):
'''simple docstring'''
lowercase_ = MPNetModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ = model(lowercase_ , lowercase_ )
lowercase_ = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase__ ( self : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : Optional[Any] ):
'''simple docstring'''
lowercase_ = MPNetForQuestionAnswering(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ = model(
lowercase_ , attention_mask=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self : Any , lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : Union[str, Any] ):
'''simple docstring'''
lowercase_ = self.num_labels
lowercase_ = MPNetForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : Union[str, Any] , lowercase_ : Any , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.num_choices
lowercase_ = MPNetForMultipleChoice(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ = model(
lowercase_ , attention_mask=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase__ ( self : Tuple , lowercase_ : Optional[int] , lowercase_ : Any , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.num_labels
lowercase_ = MPNetForTokenClassification(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
((lowercase_) , (lowercase_) , (lowercase_) , (lowercase_) , (lowercase_) , (lowercase_)) = config_and_inputs
lowercase_ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _a ( __a , __a , unittest.TestCase ):
"""simple docstring"""
A_ = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
A_ = (
{
'''feature-extraction''': MPNetModel,
'''fill-mask''': MPNetForMaskedLM,
'''question-answering''': MPNetForQuestionAnswering,
'''text-classification''': MPNetForSequenceClassification,
'''token-classification''': MPNetForTokenClassification,
'''zero-shot''': MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ = False
A_ = True
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = MPNetModelTester(self )
lowercase_ = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*lowercase_ )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*lowercase_ )
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*lowercase_ )
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*lowercase_ )
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*lowercase_ )
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = MPNetModel.from_pretrained("""microsoft/mpnet-base""" )
lowercase_ = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
lowercase_ = model(lowercase_ )[0]
lowercase_ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , lowercase_ )
lowercase_ = torch.tensor(
[[[-0.0_5_5_0, 0.1_9_4_3, -0.0_7_4_0], [-0.0_5_6_2, 0.2_2_1_1, -0.0_5_7_9], [-0.0_4_3_7, 0.3_3_3_7, -0.0_6_4_1]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase_ , atol=1e-4 ) )
| 603 | 1 |
'''simple docstring'''
from manim import *
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = Rectangle(height=0.5 , width=0.5 )
SCREAMING_SNAKE_CASE : Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
SCREAMING_SNAKE_CASE : List[str] = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : Tuple = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : int = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
SCREAMING_SNAKE_CASE : Optional[int] = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
SCREAMING_SNAKE_CASE : List[Any] = VGroup(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = Text("""CPU""" , font_size=24 )
SCREAMING_SNAKE_CASE : int = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = [mem.copy() for i in range(1 )]
SCREAMING_SNAKE_CASE : str = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
SCREAMING_SNAKE_CASE : Optional[Any] = Text("""GPU""" , font_size=24 )
SCREAMING_SNAKE_CASE : Optional[int] = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
gpu.align_to(__lowerCAmelCase , __lowerCAmelCase )
gpu.set_x(gpu.get_x() - 1 )
self.add(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : List[str] = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : Dict = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
SCREAMING_SNAKE_CASE : Any = Text("""Model""" , font_size=24 )
SCREAMING_SNAKE_CASE : Any = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
model.move_to([3, -1.0, 0] )
self.play(
Create(__lowerCAmelCase , run_time=1 ) , Create(__lowerCAmelCase , run_time=1 ) , Create(__lowerCAmelCase , run_time=1 ) , )
SCREAMING_SNAKE_CASE : str = MarkupText(
f'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' , font_size=24 , )
SCREAMING_SNAKE_CASE : Dict = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
SCREAMING_SNAKE_CASE : List[str] = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowerCAmelCase , run_time=2.5 ) , Write(__lowerCAmelCase ) , Write(__lowerCAmelCase ) )
self.add(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : int = []
SCREAMING_SNAKE_CASE : List[Any] = []
for i, rect in enumerate(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE : Optional[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(__lowerCAmelCase , opacity=0.7 )
cpu_target.move_to(__lowerCAmelCase )
cpu_target.generate_target()
SCREAMING_SNAKE_CASE : List[str] = 0.46 / 4
SCREAMING_SNAKE_CASE : Union[str, Any] = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__lowerCAmelCase )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=__lowerCAmelCase , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=__lowerCAmelCase , buff=0.0 )
cpu_targs.append(__lowerCAmelCase )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(__lowerCAmelCase ) )
second_animations.append(MoveToTarget(__lowerCAmelCase , run_time=1.5 ) )
self.play(*__lowerCAmelCase )
self.play(*__lowerCAmelCase )
self.wait()
| 379 |
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
lowerCAmelCase__: int = _symbol_database.Default()
lowerCAmelCase__: int = _descriptor_pool.Default().AddSerializedFile(
b"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"
)
lowerCAmelCase__: Any = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
lowerCAmelCase__: Optional[int] = None
lowerCAmelCase__: Tuple = b"H\003"
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
lowerCAmelCase__: List[Any] = 45
lowerCAmelCase__: Optional[int] = 1581
lowerCAmelCase__: Any = 1517
lowerCAmelCase__: Any = 1570
lowerCAmelCase__: Union[str, Any] = 1584
lowerCAmelCase__: str = 1793
lowerCAmelCase__: str = 1795
lowerCAmelCase__: Dict = 1916
lowerCAmelCase__: Optional[int] = 1864
lowerCAmelCase__: List[Any] = 1905
lowerCAmelCase__: Union[str, Any] = 1919
lowerCAmelCase__: Union[str, Any] = 2429
lowerCAmelCase__: List[str] = 2208
lowerCAmelCase__: List[str] = 2418
lowerCAmelCase__: Optional[Any] = 2323
lowerCAmelCase__: Dict = 2407
# @@protoc_insertion_point(module_scope)
| 345 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCamelCase : Union[str, Any] = {
'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'],
'feature_extraction_whisper': ['WhisperFeatureExtractor'],
'processing_whisper': ['WhisperProcessor'],
'tokenization_whisper': ['WhisperTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Any = ['WhisperTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Optional[Any] = [
'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'WhisperForConditionalGeneration',
'WhisperModel',
'WhisperPreTrainedModel',
'WhisperForAudioClassification',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : str = [
'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWhisperForConditionalGeneration',
'TFWhisperModel',
'TFWhisperPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Optional[int] = [
'FlaxWhisperForConditionalGeneration',
'FlaxWhisperModel',
'FlaxWhisperPreTrainedModel',
'FlaxWhisperForAudioClassification',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
_UpperCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 134 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
_UpperCamelCase : str = {'configuration_gpt_neox': ['GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXConfig']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Optional[Any] = ['GPTNeoXTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : str = [
'GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoXForCausalLM',
'GPTNeoXForQuestionAnswering',
'GPTNeoXForSequenceClassification',
'GPTNeoXForTokenClassification',
'GPTNeoXLayer',
'GPTNeoXModel',
'GPTNeoXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
_UpperCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 134 | 1 |
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = '''0.18.2'''
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 549 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a :
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : str=13 , __SCREAMING_SNAKE_CASE : int=30 , __SCREAMING_SNAKE_CASE : List[Any]=2 , __SCREAMING_SNAKE_CASE : Dict=3 , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Any=32 , __SCREAMING_SNAKE_CASE : List[str]=5 , __SCREAMING_SNAKE_CASE : List[str]=4 , __SCREAMING_SNAKE_CASE : Union[str, Any]=37 , __SCREAMING_SNAKE_CASE : Union[str, Any]="gelu" , __SCREAMING_SNAKE_CASE : List[Any]=0.1 , __SCREAMING_SNAKE_CASE : Tuple=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=10 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.02 , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , ) -> List[str]:
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = image_size
lowerCamelCase_ = patch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = is_training
lowerCamelCase_ = use_labels
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase_ = (image_size // patch_size) ** 2
lowerCamelCase_ = num_patches + 1
def UpperCamelCase ( self : Dict ) -> Union[str, Any]:
lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def UpperCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : int ) -> Any:
lowerCamelCase_ = ViTMSNModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ) -> List[str]:
lowerCamelCase_ = self.type_sequence_label_size
lowerCamelCase_ = ViTMSNForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
print('Pixel and labels shape: {pixel_values.shape}, {labels.shape}' )
print('Labels: {labels}' )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase_ = 1
lowerCamelCase_ = ViTMSNForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase_ = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase ( self : List[str] ) -> Tuple:
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class a ( __snake_case , __snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE : str = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE : Dict = (
{"""feature-extraction""": ViTMSNModel, """image-classification""": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : Dict = False
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : Optional[Any] = False
SCREAMING_SNAKE_CASE : str = False
def UpperCamelCase ( self : List[Any] ) -> List[str]:
lowerCamelCase_ = ViTMSNModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMSN does not use inputs_embeds' )
def UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
pass
def UpperCamelCase ( self : List[Any] ) -> List[Any]:
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear ) )
def UpperCamelCase ( self : Optional[int] ) -> Any:
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
@slow
def UpperCamelCase ( self : int ) -> List[Any]:
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = ViTMSNModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( ) -> List[Any]:
lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
@cached_property
def UpperCamelCase ( self : Optional[int] ) -> Any:
return ViTImageProcessor.from_pretrained('facebook/vit-msn-small' ) if is_vision_available() else None
@slow
def UpperCamelCase ( self : Dict ) -> Any:
torch.manual_seed(2 )
lowerCamelCase_ = ViTMSNForImageClassification.from_pretrained('facebook/vit-msn-small' ).to(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self.default_image_processor
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='pt' ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowerCamelCase_ = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
lowerCamelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
lowerCamelCase_ = torch.tensor([-0.0_803, -0.4_454, -0.2_375] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 549 | 1 |
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = "0.21.0"
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 708 |
'''simple docstring'''
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
lowerCAmelCase__ : Optional[int] = [
"kernels/rwkv/wkv_cuda.cu",
"kernels/rwkv/wkv_op.cpp",
"kernels/deformable_detr/ms_deform_attn.h",
"kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh",
"models/graphormer/algos_graphormer.pyx",
]
def __UpperCamelCase ( _UpperCAmelCase ):
# Test all the extensions added in the setup
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
lowerCAmelCase__ : str = argparse.ArgumentParser()
parser.add_argument("--check_lib", action="store_true", help="Whether to check the build or the actual package.")
lowerCAmelCase__ : Any = parser.parse_args()
if args.check_lib:
lowerCAmelCase__ : int = importlib.import_module("transformers")
lowerCAmelCase__ : Optional[int] = Path(transformers_module.__file__).parent
else:
lowerCAmelCase__ : Optional[int] = Path.cwd() / "build/lib/transformers"
if not test_custom_files_are_present(transformers_path):
raise ValueError("The built release does not contain the custom files. Fix this before going further!")
| 329 | 0 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__lowerCAmelCase = logging.getLogger(__name__)
def _lowercase ( a__ : str , a__ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class lowerCamelCase_ :
__lowercase : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__lowercase : Optional[str] = field(
default=lowercase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__lowercase : Optional[str] = field(
default=lowercase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
__lowercase : Optional[str] = field(
default=lowercase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class lowerCamelCase_ :
__lowercase : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
__lowercase : str = field(metadata={"help": "Should contain the data files for the task."} )
__lowercase : int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__lowercase : bool = field(
default=lowercase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def _lowercase ( ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , a__ )
# Set seed
set_seed(training_args.seed )
try:
_UpperCamelCase = processors[data_args.task_name]()
_UpperCamelCase = processor.get_labels()
_UpperCamelCase = len(a__ )
except KeyError:
raise ValueError("Task not found: %s" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=a__ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
_UpperCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_UpperCamelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=a__ , cache_dir=model_args.cache_dir , )
# Get datasets
_UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=a__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
_UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=a__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(a__ : EvalPrediction ) -> Dict:
_UpperCamelCase = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(a__ , p.label_ids )}
# Data collator
_UpperCamelCase = DataCollatorWithPadding(a__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
_UpperCamelCase = Trainer(
model=a__ , args=a__ , train_dataset=a__ , eval_dataset=a__ , compute_metrics=a__ , data_collator=a__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_UpperCamelCase = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_UpperCamelCase = trainer.evaluate()
_UpperCamelCase = os.path.join(training_args.output_dir , "eval_results.txt" )
if trainer.is_world_master():
with open(a__ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(" %s = %s" , a__ , a__ )
writer.write("%s = %s\n" % (key, value) )
results.update(a__ )
return results
def _lowercase ( a__ : Dict ) -> Any:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 147 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase_ :
def __init__( self , lowerCamelCase_ , lowerCamelCase_=3 , lowerCamelCase_=32 , lowerCamelCase_=3 , lowerCamelCase_=10 , lowerCamelCase_=[10, 20, 30, 40] , lowerCamelCase_=[1, 1, 2, 1] , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_="relu" , lowerCamelCase_=3 , lowerCamelCase_=None , ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = image_size
_UpperCamelCase = num_channels
_UpperCamelCase = embeddings_size
_UpperCamelCase = hidden_sizes
_UpperCamelCase = depths
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = hidden_act
_UpperCamelCase = num_labels
_UpperCamelCase = scope
_UpperCamelCase = len(lowerCamelCase_ )
def lowercase ( self ) -> int:
"""simple docstring"""
_UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
_UpperCamelCase = self.get_config()
return config, pixel_values, labels
def lowercase ( self ) -> List[str]:
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase = TFResNetModel(config=lowerCamelCase_ )
_UpperCamelCase = model(lowerCamelCase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = self.num_labels
_UpperCamelCase = TFResNetForImageClassification(lowerCamelCase_ )
_UpperCamelCase = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( lowercase , lowercase , unittest.TestCase ):
__lowercase : str = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
__lowercase : Union[str, Any] = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
__lowercase : Tuple = False
__lowercase : Dict = False
__lowercase : Any = False
__lowercase : int = False
__lowercase : Optional[Any] = False
def lowercase ( self ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = TFResNetModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ )
def lowercase ( self ) -> int:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase ( self ) -> List[str]:
"""simple docstring"""
return
@unittest.skip(reason="ResNet does not use inputs_embeds" )
def lowercase ( self ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="ResNet does not support input and output embeddings" )
def lowercase ( self ) -> Optional[Any]:
"""simple docstring"""
pass
def lowercase ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(lowerCamelCase_ )
_UpperCamelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase = [*signature.parameters.keys()]
_UpperCamelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def lowercase ( self ) -> Any:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowercase ( self ) -> Dict:
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
_UpperCamelCase = model_class(lowerCamelCase_ )
_UpperCamelCase = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
_UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCamelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase_ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
_UpperCamelCase = layer_type
_UpperCamelCase = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def lowercase ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def lowercase ( self ) -> List[str]:
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFResNetModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def _lowercase ( ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
@cached_property
def lowercase ( self ) -> Optional[Any]:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowercase ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_UpperCamelCase = self.default_image_processor
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=lowerCamelCase_ , return_tensors="tf" )
# forward pass
_UpperCamelCase = model(**lowerCamelCase_ )
# verify the logits
_UpperCamelCase = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
_UpperCamelCase = tf.constant([-11.10_69, -9.78_77, -8.37_77] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , lowerCamelCase_ , atol=1E-4 ) )
| 147 | 1 |
"""simple docstring"""
import cmath
import math
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> complex:
_snake_case = math.radians(lowerCAmelCase_ )
_snake_case = math.radians(lowerCAmelCase_ )
# Convert voltage and current to rectangular form
_snake_case = cmath.rect(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = cmath.rect(lowerCAmelCase_ , lowerCAmelCase_ )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_=False ) -> str:
_snake_case = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_snake_case = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ) -> int:
for i in range(config.num_hidden_layers ):
if base_model:
_snake_case = ''''''
else:
_snake_case = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_snake_case = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
_snake_case = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_snake_case = in_proj_weight[
: config.hidden_size, :
]
_snake_case = in_proj_bias[: config.hidden_size]
_snake_case = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_snake_case = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_snake_case = in_proj_weight[
-config.hidden_size :, :
]
_snake_case = in_proj_bias[-config.hidden_size :]
def snake_case ( lowerCAmelCase_ ) -> Any:
_snake_case = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(lowerCAmelCase_ , lowerCAmelCase_ )
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict:
_snake_case = dct.pop(lowerCAmelCase_ )
_snake_case = val
def snake_case ( ) -> List[Any]:
_snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_snake_case = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True ) -> Any:
_snake_case = ViTConfig()
# patch_size
if model_name[-1] == "8":
_snake_case = 8
# set labels if required
if not base_model:
_snake_case = 1000
_snake_case = '''huggingface/label-files'''
_snake_case = '''imagenet-1k-id2label.json'''
_snake_case = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type='''dataset''' ) , '''r''' ) )
_snake_case = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
_snake_case = idalabel
_snake_case = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
_snake_case = 384
_snake_case = 1536
_snake_case = 12
_snake_case = 6
# load original model from torch hub
_snake_case = torch.hub.load('''facebookresearch/dino:main''' , lowerCAmelCase_ )
original_model.eval()
# load state_dict of original model, remove and rename some keys
_snake_case = original_model.state_dict()
if base_model:
remove_classification_head_(lowerCAmelCase_ )
_snake_case = create_rename_keys(lowerCAmelCase_ , base_model=lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
read_in_q_k_v(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# load HuggingFace model
if base_model:
_snake_case = ViTModel(lowerCAmelCase_ , add_pooling_layer=lowerCAmelCase_ ).eval()
else:
_snake_case = ViTForImageClassification(lowerCAmelCase_ ).eval()
model.load_state_dict(lowerCAmelCase_ )
# Check outputs on an image, prepared by ViTImageProcessor
_snake_case = ViTImageProcessor()
_snake_case = image_processor(images=prepare_img() , return_tensors='''pt''' )
_snake_case = encoding['''pixel_values''']
_snake_case = model(lowerCAmelCase_ )
if base_model:
_snake_case = original_model(lowerCAmelCase_ )
assert torch.allclose(lowerCAmelCase_ , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
_snake_case = original_model(lowerCAmelCase_ )
assert logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase_ , outputs.logits , atol=1E-3 )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''dino_vitb16''',
type=str,
help='''Name of the model trained with DINO you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--base_model''',
action='''store_true''',
help='''Whether to only convert the base model (no projection head weights).''',
)
parser.set_defaults(base_model=True)
snake_case = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 404 | 0 |
def __UpperCAmelCase ( __a : list[int] ,__a : list[int] ) -> None:
"""simple docstring"""
_a : List[Any] = len(__a )
print('''The following activities are selected:''' )
# The first activity is always selected
_a : List[Any] = 0
print(__a ,end=''',''' )
# Consider rest of the activities
for j in range(__a ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(__a ,end=''',''' )
_a : Optional[int] = j
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ = [1, 3, 0, 5, 8, 5]
a__ = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 14 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase: List[str] = logging.get_logger(__name__)
_lowerCAmelCase: Tuple = torch.device('cpu')
def _lowercase( ):
a__ ='http://images.cocodataset.org/val2017/000000039769.jpg'
a__ =Image.open(requests.get(__a , stream=__a ).raw )
return im
def _lowercase( __a : Optional[Any] ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703e00, 2.1107e00, -2.0811e00, 8.8685e-01, 2.4360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636e-01, 2.3478e-01, -1.6963e00, -1.7381e00, -8.6337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768e-01, -4.7429e-01, -1.0897e00, -1.0248e00, 3.5523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330e-01, 2.4211e-01, -6.0185e-01, -8.2789e-01, -6.0446e-02] )
def _lowercase( __a : int , __a : int , __a : Optional[Any] ):
a__ =dct.pop(__a )
a__ =val
def _lowercase( __a : Optional[Any] ):
a__ =[]
for k in state_dict.keys():
a__ =k
if ".pwconv" in k:
a__ =k_new.replace('.pwconv' , '.point_wise_conv' )
if ".dwconv" in k:
a__ =k_new.replace('.dwconv' , '.depth_wise_conv' )
if ".Proj." in k:
a__ =k_new.replace('.Proj.' , '.proj.' )
if "patch_embed" in k_new:
a__ =k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
a__ =k_new.split('.' )
if ls[2].isdigit():
a__ ='swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] )
else:
a__ =k_new.replace('network' , 'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def _lowercase( __a : Union[str, Any] , __a : int , __a : str ):
a__ =SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
a__ =1000
a__ ='huggingface/label-files'
a__ ='imagenet-1k-id2label.json'
a__ =json.load(open(hf_hub_download(__a , __a , repo_type='dataset' ) , 'r' ) )
a__ ={int(__a ): v for k, v in idalabel.items()}
a__ =idalabel
a__ ={v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
a__ =[3, 3, 6, 4]
a__ =[48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
a__ =[3, 3, 9, 6]
a__ =[48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
a__ =[4, 3, 10, 5]
a__ =[48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
a__ =[4, 4, 12, 6]
a__ =[64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
a__ =torch.hub.load_state_dict_from_url(__a , map_location='cpu' , check_hash=__a )
else:
a__ =torch.load(__a , map_location='cpu' )
a__ =checkpoint
a__ =create_rename_keys(__a )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__a , __a , __a )
# load HuggingFace model
a__ =SwiftFormerForImageClassification(__a ).eval()
hf_model.load_state_dict(__a )
# prepare test inputs
a__ =prepare_img()
a__ =ViTImageProcessor.from_pretrained('preprocessor_config' )
a__ =processor(images=__a , return_tensors='pt' )
# compare outputs from both models
a__ =get_expected_output(__a )
a__ =hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , __a , atol=1e-3 )
Path(__a ).mkdir(exist_ok=__a )
print(f"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(__a )
if __name__ == "__main__":
_lowerCAmelCase: Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swiftformer_name',
default='swiftformer_xs',
choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'],
type=str,
help='Name of the SwiftFormer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='./converted_outputs/',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.')
_lowerCAmelCase: Optional[int] = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 20 | 0 |
def A ( a_ ,a_ ,a_ ) -> int:
def update_area_of_max_square(a_ ,a_ ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
__UpperCamelCase : Any =update_area_of_max_square(_A ,col + 1 )
__UpperCamelCase : Tuple =update_area_of_max_square(row + 1 ,col + 1 )
__UpperCamelCase : Dict =update_area_of_max_square(row + 1 ,_A )
if mat[row][col]:
__UpperCamelCase : Union[str, Any] =1 + min([right, diagonal, down] )
__UpperCamelCase : Optional[int] =max(largest_square_area[0] ,_A )
return sub_problem_sol
else:
return 0
__UpperCamelCase : Any =[0]
update_area_of_max_square(0 ,0 )
return largest_square_area[0]
def A ( a_ ,a_ ,a_ ) -> int:
def update_area_of_max_square_using_dp_array(
a_ ,a_ ,a_ ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
__UpperCamelCase : List[Any] =update_area_of_max_square_using_dp_array(_A ,col + 1 ,_A )
__UpperCamelCase : List[Any] =update_area_of_max_square_using_dp_array(row + 1 ,col + 1 ,_A )
__UpperCamelCase : List[Any] =update_area_of_max_square_using_dp_array(row + 1 ,_A ,_A )
if mat[row][col]:
__UpperCamelCase : Union[str, Any] =1 + min([right, diagonal, down] )
__UpperCamelCase : Optional[Any] =max(largest_square_area[0] ,_A )
__UpperCamelCase : Optional[Any] =sub_problem_sol
return sub_problem_sol
else:
return 0
__UpperCamelCase : Optional[int] =[0]
__UpperCamelCase : int =[[-1] * cols for _ in range(_A )]
update_area_of_max_square_using_dp_array(0 ,0 ,_A )
return largest_square_area[0]
def A ( a_ ,a_ ,a_ ) -> int:
__UpperCamelCase : List[str] =[[0] * (cols + 1) for _ in range(rows + 1 )]
__UpperCamelCase : str =0
for row in range(rows - 1 ,-1 ,-1 ):
for col in range(cols - 1 ,-1 ,-1 ):
__UpperCamelCase : str =dp_array[row][col + 1]
__UpperCamelCase : int =dp_array[row + 1][col + 1]
__UpperCamelCase : List[Any] =dp_array[row + 1][col]
if mat[row][col] == 1:
__UpperCamelCase : Any =1 + min(_A ,_A ,_A )
__UpperCamelCase : Optional[Any] =max(dp_array[row][col] ,_A )
else:
__UpperCamelCase : Union[str, Any] =0
return largest_square_area
def A ( a_ ,a_ ,a_ ) -> int:
__UpperCamelCase : Union[str, Any] =[0] * (cols + 1)
__UpperCamelCase : str =[0] * (cols + 1)
__UpperCamelCase : Optional[int] =0
for row in range(rows - 1 ,-1 ,-1 ):
for col in range(cols - 1 ,-1 ,-1 ):
__UpperCamelCase : Optional[Any] =current_row[col + 1]
__UpperCamelCase : Any =next_row[col + 1]
__UpperCamelCase : Tuple =next_row[col]
if mat[row][col] == 1:
__UpperCamelCase : List[Any] =1 + min(_A ,_A ,_A )
__UpperCamelCase : Dict =max(current_row[col] ,_A )
else:
__UpperCamelCase : Optional[Any] =0
__UpperCamelCase : List[str] =current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 719 |
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
A_ :Union[str, Any] = {
'''distilbert''': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'''roberta''': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'''bert''': (BertConfig, BertForMaskedLM, BertTokenizer),
'''gpt2''': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def A ( a_ ) -> List[Any]:
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def A ( a_ ,a_ ) -> Optional[Any]:
if args.student_type == "roberta":
__UpperCamelCase : Union[str, Any] =False
elif args.student_type == "gpt2":
__UpperCamelCase : Optional[Any] =False
def A ( a_ ,a_ ) -> Tuple:
if args.student_type == "roberta":
__UpperCamelCase : Optional[Any] =False
def A ( ) -> str:
__UpperCamelCase : Optional[Any] =argparse.ArgumentParser(description='Training' )
parser.add_argument('--force' ,action='store_true' ,help='Overwrite dump_path if it already exists.' )
parser.add_argument(
'--dump_path' ,type=a_ ,required=a_ ,help='The output directory (log, checkpoints, parameters, etc.)' )
parser.add_argument(
'--data_file' ,type=a_ ,required=a_ ,help='The binarized file (tokenized + tokens_to_ids) and grouped by sequence.' ,)
parser.add_argument(
'--student_type' ,type=a_ ,choices=['distilbert', 'roberta', 'gpt2'] ,required=a_ ,help='The student type (DistilBERT, RoBERTa).' ,)
parser.add_argument('--student_config' ,type=a_ ,required=a_ ,help='Path to the student configuration.' )
parser.add_argument(
'--student_pretrained_weights' ,default=a_ ,type=a_ ,help='Load student initialization checkpoint.' )
parser.add_argument(
'--teacher_type' ,choices=['bert', 'roberta', 'gpt2'] ,required=a_ ,help='Teacher type (BERT, RoBERTa).' )
parser.add_argument('--teacher_name' ,type=a_ ,required=a_ ,help='The teacher model.' )
parser.add_argument('--temperature' ,default=2.0 ,type=a_ ,help='Temperature for the softmax temperature.' )
parser.add_argument(
'--alpha_ce' ,default=0.5 ,type=a_ ,help='Linear weight for the distillation loss. Must be >=0.' )
parser.add_argument(
'--alpha_mlm' ,default=0.0 ,type=a_ ,help='Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.' ,)
parser.add_argument('--alpha_clm' ,default=0.5 ,type=a_ ,help='Linear weight for the CLM loss. Must be >=0.' )
parser.add_argument('--alpha_mse' ,default=0.0 ,type=a_ ,help='Linear weight of the MSE loss. Must be >=0.' )
parser.add_argument(
'--alpha_cos' ,default=0.0 ,type=a_ ,help='Linear weight of the cosine embedding loss. Must be >=0.' )
parser.add_argument(
'--mlm' ,action='store_true' ,help='The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.' )
parser.add_argument(
'--mlm_mask_prop' ,default=0.15 ,type=a_ ,help='Proportion of tokens for which we need to make a prediction.' ,)
parser.add_argument('--word_mask' ,default=0.8 ,type=a_ ,help='Proportion of tokens to mask out.' )
parser.add_argument('--word_keep' ,default=0.1 ,type=a_ ,help='Proportion of tokens to keep.' )
parser.add_argument('--word_rand' ,default=0.1 ,type=a_ ,help='Proportion of tokens to randomly replace.' )
parser.add_argument(
'--mlm_smoothing' ,default=0.7 ,type=a_ ,help='Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).' ,)
parser.add_argument('--token_counts' ,type=a_ ,help='The token counts in the data_file for MLM.' )
parser.add_argument(
'--restrict_ce_to_mask' ,action='store_true' ,help='If true, compute the distillation loss only the [MLM] prediction distribution.' ,)
parser.add_argument(
'--freeze_pos_embs' ,action='store_true' ,help='Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.' ,)
parser.add_argument(
'--freeze_token_type_embds' ,action='store_true' ,help='Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.' ,)
parser.add_argument('--n_epoch' ,type=a_ ,default=3 ,help='Number of pass on the whole dataset.' )
parser.add_argument('--batch_size' ,type=a_ ,default=5 ,help='Batch size (for each process).' )
parser.add_argument(
'--group_by_size' ,action='store_false' ,help='If true, group sequences that have similar length into the same batch. Default is true.' ,)
parser.add_argument(
'--gradient_accumulation_steps' ,type=a_ ,default=50 ,help='Gradient accumulation for larger training batches.' ,)
parser.add_argument('--warmup_prop' ,default=0.05 ,type=a_ ,help='Linear warmup proportion.' )
parser.add_argument('--weight_decay' ,default=0.0 ,type=a_ ,help='Weight decay if we apply some.' )
parser.add_argument('--learning_rate' ,default=5e-4 ,type=a_ ,help='The initial learning rate for Adam.' )
parser.add_argument('--adam_epsilon' ,default=1e-6 ,type=a_ ,help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' ,default=5.0 ,type=a_ ,help='Max gradient norm.' )
parser.add_argument('--initializer_range' ,default=0.02 ,type=a_ ,help='Random initialization range.' )
parser.add_argument(
'--fp16' ,action='store_true' ,help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' ,)
parser.add_argument(
'--fp16_opt_level' ,type=a_ ,default='O1' ,help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) ,)
parser.add_argument('--n_gpu' ,type=a_ ,default=1 ,help='Number of GPUs in the node.' )
parser.add_argument('--local_rank' ,type=a_ ,default=-1 ,help='Distributed training - Local rank' )
parser.add_argument('--seed' ,type=a_ ,default=56 ,help='Random seed' )
parser.add_argument('--log_interval' ,type=a_ ,default=500 ,help='Tensorboard logging interval.' )
parser.add_argument('--checkpoint_interval' ,type=a_ ,default=4_000 ,help='Checkpoint interval.' )
__UpperCamelCase : Any =parser.parse_args()
sanity_checks(a_ )
# ARGS #
init_gpu_params(a_ )
set_seed(a_ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F'Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'
' itUse `--force` if you want to overwrite it' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F'Experiment will be dumped and logged in {args.dump_path}' )
# SAVE PARAMS #
logger.info(F'Param: {args}' )
with open(os.path.join(args.dump_path ,'parameters.json' ) ,'w' ) as f:
json.dump(vars(a_ ) ,a_ ,indent=4 )
git_log(args.dump_path )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Union[str, Any] =MODEL_CLASSES[args.student_type]
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : str =MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
__UpperCamelCase : Any =teacher_tokenizer_class.from_pretrained(args.teacher_name )
__UpperCamelCase : int ={}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
__UpperCamelCase : Any =tokenizer.all_special_tokens.index(a_ )
__UpperCamelCase : Optional[Any] =tokenizer.all_special_ids[idx]
logger.info(F'Special tokens {special_tok_ids}' )
__UpperCamelCase : List[str] =special_tok_ids
__UpperCamelCase : Union[str, Any] =tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'Loading data from {args.data_file}' )
with open(args.data_file ,'rb' ) as fp:
__UpperCamelCase : Tuple =pickle.load(a_ )
if args.mlm:
logger.info(F'Loading token counts from {args.token_counts} (already pre-computed)' )
with open(args.token_counts ,'rb' ) as fp:
__UpperCamelCase : str =pickle.load(a_ )
__UpperCamelCase : Dict =np.maximum(a_ ,1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
__UpperCamelCase : Any =0.0 # do not predict special tokens
__UpperCamelCase : Dict =torch.from_numpy(a_ )
else:
__UpperCamelCase : int =None
__UpperCamelCase : str =LmSeqsDataset(params=a_ ,data=a_ )
logger.info('Data loader created.' )
# STUDENT #
logger.info(F'Loading student config from {args.student_config}' )
__UpperCamelCase : Dict =student_config_class.from_pretrained(args.student_config )
__UpperCamelCase : List[Any] =True
if args.student_pretrained_weights is not None:
logger.info(F'Loading pretrained weights from {args.student_pretrained_weights}' )
__UpperCamelCase : Optional[Any] =student_model_class.from_pretrained(args.student_pretrained_weights ,config=a_ )
else:
__UpperCamelCase : List[Any] =student_model_class(a_ )
if args.n_gpu > 0:
student.to(F'cuda:{args.local_rank}' )
logger.info('Student loaded.' )
# TEACHER #
__UpperCamelCase : Optional[Any] =teacher_model_class.from_pretrained(args.teacher_name ,output_hidden_states=a_ )
if args.n_gpu > 0:
teacher.to(F'cuda:{args.local_rank}' )
logger.info(F'Teacher loaded from {args.teacher_name}.' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(a_ ,a_ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(a_ ,a_ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
__UpperCamelCase : Any =Distiller(
params=a_ ,dataset=a_ ,token_probs=a_ ,student=a_ ,teacher=a_ )
distiller.train()
logger.info('Let\'s go get some drinks.' )
if __name__ == "__main__":
main()
| 154 | 0 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_UpperCAmelCase : Any = {"""LayoutLMv2Config""", """LayoutLMv3Config"""}
@is_pipeline_test
class lowerCAmelCase ( unittest.TestCase ):
UpperCAmelCase__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCAmelCase__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
UpperCAmelCase__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
UpperCAmelCase__ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def A_ ( self : Any ) -> Optional[Any]:
lowerCamelCase__ : Optional[Any] = pipeline(
task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='pt' )
lowerCamelCase__ : Optional[Any] = text_classifier('This is great !' )
self.assertEqual(nested_simplify(UpperCAmelCase ) , [{'label': 'LABEL_0', 'score': 0.5_0_4}] )
lowerCamelCase__ : int = text_classifier('This is great !' , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [{'label': 'LABEL_0', 'score': 0.5_0_4}, {'label': 'LABEL_1', 'score': 0.4_9_6}] )
lowerCamelCase__ : str = text_classifier(['This is great !', 'This is bad'] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
[{'label': 'LABEL_0', 'score': 0.5_0_4}, {'label': 'LABEL_1', 'score': 0.4_9_6}],
[{'label': 'LABEL_0', 'score': 0.5_0_4}, {'label': 'LABEL_1', 'score': 0.4_9_6}],
] , )
lowerCamelCase__ : Optional[Any] = text_classifier('This is great !' , top_k=1 )
self.assertEqual(nested_simplify(UpperCAmelCase ) , [{'label': 'LABEL_0', 'score': 0.5_0_4}] )
# Legacy behavior
lowerCamelCase__ : Any = text_classifier('This is great !' , return_all_scores=UpperCAmelCase )
self.assertEqual(nested_simplify(UpperCAmelCase ) , [{'label': 'LABEL_0', 'score': 0.5_0_4}] )
lowerCamelCase__ : int = text_classifier('This is great !' , return_all_scores=UpperCAmelCase )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [[{'label': 'LABEL_0', 'score': 0.5_0_4}, {'label': 'LABEL_1', 'score': 0.4_9_6}]] )
lowerCamelCase__ : Tuple = text_classifier(['This is great !', 'Something else'] , return_all_scores=UpperCAmelCase )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
[{'label': 'LABEL_0', 'score': 0.5_0_4}, {'label': 'LABEL_1', 'score': 0.4_9_6}],
[{'label': 'LABEL_0', 'score': 0.5_0_4}, {'label': 'LABEL_1', 'score': 0.4_9_6}],
] , )
lowerCamelCase__ : str = text_classifier(['This is great !', 'Something else'] , return_all_scores=UpperCAmelCase )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
{'label': 'LABEL_0', 'score': 0.5_0_4},
{'label': 'LABEL_0', 'score': 0.5_0_4},
] , )
@require_torch
def A_ ( self : Dict ) -> Any:
import torch
lowerCamelCase__ : Dict = pipeline(
task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='pt' , device=torch.device('cpu' ) , )
lowerCamelCase__ : Tuple = text_classifier('This is great !' )
self.assertEqual(nested_simplify(UpperCAmelCase ) , [{'label': 'LABEL_0', 'score': 0.5_0_4}] )
@require_tf
def A_ ( self : Optional[Any] ) -> Dict:
lowerCamelCase__ : Dict = pipeline(
task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='tf' )
lowerCamelCase__ : Dict = text_classifier('This is great !' )
self.assertEqual(nested_simplify(UpperCAmelCase ) , [{'label': 'LABEL_0', 'score': 0.5_0_4}] )
@slow
@require_torch
def A_ ( self : List[Any] ) -> str:
lowerCamelCase__ : str = pipeline('text-classification' )
lowerCamelCase__ : str = text_classifier('This is great !' )
self.assertEqual(nested_simplify(UpperCAmelCase ) , [{'label': 'POSITIVE', 'score': 1.0}] )
lowerCamelCase__ : str = text_classifier('This is bad !' )
self.assertEqual(nested_simplify(UpperCAmelCase ) , [{'label': 'NEGATIVE', 'score': 1.0}] )
lowerCamelCase__ : List[Any] = text_classifier('Birds are a type of animal' )
self.assertEqual(nested_simplify(UpperCAmelCase ) , [{'label': 'POSITIVE', 'score': 0.9_8_8}] )
@slow
@require_tf
def A_ ( self : Tuple ) -> Optional[int]:
lowerCamelCase__ : Any = pipeline('text-classification' , framework='tf' )
lowerCamelCase__ : Any = text_classifier('This is great !' )
self.assertEqual(nested_simplify(UpperCAmelCase ) , [{'label': 'POSITIVE', 'score': 1.0}] )
lowerCamelCase__ : int = text_classifier('This is bad !' )
self.assertEqual(nested_simplify(UpperCAmelCase ) , [{'label': 'NEGATIVE', 'score': 1.0}] )
lowerCamelCase__ : Dict = text_classifier('Birds are a type of animal' )
self.assertEqual(nested_simplify(UpperCAmelCase ) , [{'label': 'POSITIVE', 'score': 0.9_8_8}] )
def A_ ( self : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] ) -> Dict:
lowerCamelCase__ : str = TextClassificationPipeline(model=UpperCAmelCase , tokenizer=UpperCAmelCase )
return text_classifier, ["HuggingFace is in", "This is another test"]
def A_ ( self : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple ) -> Optional[Any]:
lowerCamelCase__ : Optional[Any] = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
lowerCamelCase__ : List[Any] = 'HuggingFace is in'
lowerCamelCase__ : Optional[Any] = text_classifier(UpperCAmelCase )
self.assertEqual(nested_simplify(UpperCAmelCase ) , [{'label': ANY(UpperCAmelCase ), 'score': ANY(UpperCAmelCase )}] )
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
lowerCamelCase__ : Tuple = ['HuggingFace is in ', 'Paris is in France']
lowerCamelCase__ : Optional[int] = text_classifier(UpperCAmelCase )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [{'label': ANY(UpperCAmelCase ), 'score': ANY(UpperCAmelCase )}, {'label': ANY(UpperCAmelCase ), 'score': ANY(UpperCAmelCase )}] , )
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
self.assertTrue(outputs[1]['label'] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
lowerCamelCase__ : int = text_classifier(UpperCAmelCase , top_k=UpperCAmelCase )
lowerCamelCase__ : int = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [[{'label': ANY(UpperCAmelCase ), 'score': ANY(UpperCAmelCase )}] * N, [{'label': ANY(UpperCAmelCase ), 'score': ANY(UpperCAmelCase )}] * N] , )
lowerCamelCase__ : Optional[int] = {'text': 'HuggingFace is in ', 'text_pair': 'Paris is in France'}
lowerCamelCase__ : Tuple = text_classifier(UpperCAmelCase )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , {'label': ANY(UpperCAmelCase ), 'score': ANY(UpperCAmelCase )} , )
self.assertTrue(outputs['label'] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
lowerCamelCase__ : Optional[int] = [['HuggingFace is in ', 'Paris is in France']]
with self.assertRaises(UpperCAmelCase ):
text_classifier(UpperCAmelCase )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
lowerCamelCase__ : Union[str, Any] = text_classifier([[['HuggingFace is in ', 'Paris is in France']]] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [{'label': ANY(UpperCAmelCase ), 'score': ANY(UpperCAmelCase )}] , )
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
| 295 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class lowerCAmelCase ( unittest.TestCase ):
def __init__( self : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict=7 , UpperCAmelCase : Union[str, Any]=3 , UpperCAmelCase : List[str]=18 , UpperCAmelCase : Dict=30 , UpperCAmelCase : List[Any]=400 , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Tuple=None , UpperCAmelCase : str=True , UpperCAmelCase : Tuple=False , UpperCAmelCase : List[str]=True , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : Optional[Any]=[0.5, 0.5, 0.5] , UpperCAmelCase : List[str]=[0.5, 0.5, 0.5] , ) -> List[str]:
lowerCamelCase__ : List[Any] = parent
lowerCamelCase__ : str = batch_size
lowerCamelCase__ : List[str] = num_channels
lowerCamelCase__ : str = image_size
lowerCamelCase__ : List[Any] = min_resolution
lowerCamelCase__ : int = max_resolution
lowerCamelCase__ : int = do_resize
lowerCamelCase__ : int = size if size is not None else {'height': 18, 'width': 20}
lowerCamelCase__ : Tuple = do_thumbnail
lowerCamelCase__ : str = do_align_axis
lowerCamelCase__ : str = do_pad
lowerCamelCase__ : Optional[Any] = do_normalize
lowerCamelCase__ : List[str] = image_mean
lowerCamelCase__ : Dict = image_std
def A_ ( self : str ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowerCAmelCase ( __UpperCamelCase, unittest.TestCase ):
UpperCAmelCase__ = DonutImageProcessor if is_vision_available() else None
def A_ ( self : List[Any] ) -> int:
lowerCamelCase__ : Union[str, Any] = DonutImageProcessingTester(self )
@property
def A_ ( self : Dict ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def A_ ( self : Dict ) -> Any:
lowerCamelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'size' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'do_thumbnail' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'do_align_long_axis' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'do_pad' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'image_mean' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'image_std' ) )
def A_ ( self : Tuple ) -> Union[str, Any]:
lowerCamelCase__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 20} )
lowerCamelCase__ : int = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
# Previous config had dimensions in (width, height) order
lowerCamelCase__ : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'height': 84, 'width': 42} )
def A_ ( self : Optional[Any] ) -> List[str]:
pass
@is_flaky()
def A_ ( self : List[str] ) -> Any:
# Initialize image_processing
lowerCamelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , Image.Image )
# Test not batched input
lowerCamelCase__ : List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowerCamelCase__ : Tuple = image_processing(UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def A_ ( self : int ) -> Tuple:
# Initialize image_processing
lowerCamelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , numpify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , np.ndarray )
# Test not batched input
lowerCamelCase__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowerCamelCase__ : Tuple = image_processing(UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def A_ ( self : Any ) -> Tuple:
# Initialize image_processing
lowerCamelCase__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , torchify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , torch.Tensor )
# Test not batched input
lowerCamelCase__ : List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowerCamelCase__ : Dict = image_processing(UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 295 | 1 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowercase ( __A : List[Any] ) -> List[str]:
'''simple docstring'''
return getitem, k
def lowercase ( __A : Tuple , __A : Optional[Any] ) -> List[Any]:
'''simple docstring'''
return setitem, k, v
def lowercase ( __A : str ) -> Optional[Any]:
'''simple docstring'''
return delitem, k
def lowercase ( __A : str , __A : int , *__A : str ) -> Optional[int]:
'''simple docstring'''
try:
return fun(__A , *__A ), None
except Exception as e:
return None, e
__lowercase : List[str] = (
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
)
__lowercase : Any = [
_set('''key_a''', '''val_a'''),
_set('''key_a''', '''val_b'''),
]
__lowercase : Dict = [
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
_del('''key_a'''),
_del('''key_b'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
]
__lowercase : Union[str, Any] = [
_get('''key_a'''),
_del('''key_a'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
_del('''key_a'''),
_get('''key_a'''),
]
__lowercase : Any = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
__lowercase : Optional[Any] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('''key_a''', '''val_b'''),
]
@pytest.mark.parametrize(
"""operations""" , (
pytest.param(_add_items , id="""add items""" ),
pytest.param(_overwrite_items , id="""overwrite items""" ),
pytest.param(_delete_items , id="""delete items""" ),
pytest.param(_access_absent_items , id="""access absent items""" ),
pytest.param(_add_with_resize_up , id="""add with resize up""" ),
pytest.param(_add_with_resize_down , id="""add with resize down""" ),
) , )
def lowercase ( __A : int ) -> Dict:
'''simple docstring'''
snake_case : int = HashMap(initial_block_size=4 )
snake_case : str = {}
for _, (fun, *args) in enumerate(__A ):
snake_case , snake_case : Any = _run_operation(__A , __A , *__A )
snake_case , snake_case : Tuple = _run_operation(__A , __A , *__A )
assert my_res == py_res
assert str(__A ) == str(__A )
assert set(__A ) == set(__A )
assert len(__A ) == len(__A )
assert set(my.items() ) == set(py.items() )
def lowercase ( ) -> Optional[int]:
'''simple docstring'''
def is_public(__A : str ) -> bool:
return not name.startswith("""_""" )
snake_case : List[str] = {name for name in dir({} ) if is_public(__A )}
snake_case : Any = {name for name in dir(HashMap() ) if is_public(__A )}
assert dict_public_names > hash_public_names
| 315 |
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
__lowercase : Union[str, Any] = logging.get_logger(__name__)
__lowercase : Optional[Any] = r'''
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
'''
class _A ( snake_case ):
'''simple docstring'''
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
def __call__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
raise NotImplementedError("""StoppingCriteria needs to be subclassed""" )
class _A ( snake_case ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
snake_case : Optional[Any] = max_length
snake_case : List[Any] = max_position_embeddings
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
def __call__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Dict = input_ids.shape[-1]
snake_case : List[Any] = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"""This is a friendly reminder - the current text generation call will exceed the model's predefined """
F"""maximum length ({self.max_position_embeddings}). Depending on the model, you may observe """
"""exceptions, performance degradation, or nothing at all.""" )
return is_done
class _A ( snake_case ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
warnings.warn(
"""The class `MaxNewTokensCriteria` is deprecated. """
F"""Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` """
"""with `max_length = start_length + max_new_tokens` instead.""" ,SCREAMING_SNAKE_CASE_ ,)
snake_case : Tuple = start_length
snake_case : List[str] = max_new_tokens
snake_case : Optional[Any] = start_length + max_new_tokens
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
def __call__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return input_ids.shape[-1] >= self.max_length
class _A ( snake_case ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
snake_case : List[str] = max_time
snake_case : int = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
def __call__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return time.time() - self.initial_timestamp > self.max_time
class _A ( snake_case ):
'''simple docstring'''
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
def __call__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return any(criteria(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) for criteria in self )
@property
def snake_case_ ( self ):
'''simple docstring'''
for stopping_criterium in self:
if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
return stopping_criterium.max_length
elif isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
return stopping_criterium.max_length
return None
def lowercase ( __A : StoppingCriteriaList , __A : int ) -> StoppingCriteriaList:
'''simple docstring'''
snake_case : List[Any] = stopping_criteria.max_length
snake_case : List[str] = deepcopy(__A )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("""You set different `max_length` for stopping criteria and `max_length` parameter""" , __A )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=__A ) )
return new_stopping_criteria
| 315 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {}
class SCREAMING_SNAKE_CASE_ ( UpperCamelCase_ ):
"""simple docstring"""
__magic_name__ : str = 'llama'
__magic_name__ : Union[str, Any] = ['past_key_values']
def __init__( self : str , lowerCAmelCase : Union[str, Any]=32000 , lowerCAmelCase : Any=4096 , lowerCAmelCase : int=11008 , lowerCAmelCase : int=32 , lowerCAmelCase : Optional[Any]=32 , lowerCAmelCase : List[Any]=None , lowerCAmelCase : List[Any]="silu" , lowerCAmelCase : Union[str, Any]=2048 , lowerCAmelCase : Any=0.02 , lowerCAmelCase : Any=1E-6 , lowerCAmelCase : int=True , lowerCAmelCase : Optional[int]=0 , lowerCAmelCase : Any=1 , lowerCAmelCase : Any=2 , lowerCAmelCase : str=1 , lowerCAmelCase : str=False , lowerCAmelCase : Union[str, Any]=None , **lowerCAmelCase : List[Any] , ) -> str:
"""simple docstring"""
__UpperCamelCase : Tuple = vocab_size
__UpperCamelCase : Optional[int] = max_position_embeddings
__UpperCamelCase : int = hidden_size
__UpperCamelCase : Dict = intermediate_size
__UpperCamelCase : List[Any] = num_hidden_layers
__UpperCamelCase : List[Any] = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__UpperCamelCase : List[Any] = num_attention_heads
__UpperCamelCase : Any = num_key_value_heads
__UpperCamelCase : Union[str, Any] = hidden_act
__UpperCamelCase : int = initializer_range
__UpperCamelCase : Any = rms_norm_eps
__UpperCamelCase : Optional[Any] = pretraining_tp
__UpperCamelCase : str = use_cache
__UpperCamelCase : Union[str, Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , tie_word_embeddings=a__ , **a__ , )
def lowerCamelCase__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , a__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
F'''got {self.rope_scaling}''' )
__UpperCamelCase : Optional[Any] = self.rope_scaling.get("""type""" , a__ )
__UpperCamelCase : Any = self.rope_scaling.get("""factor""" , a__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(a__ , a__ ) or rope_scaling_factor <= 1.0:
raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 279 |
"""simple docstring"""
from string import ascii_lowercase, ascii_uppercase
def UpperCAmelCase ( snake_case : str ):
if not sentence:
return ""
_lowerCAmelCase:Tuple = dict(zip(snake_case , snake_case ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 227 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {"""vocab_file""": """sentencepiece.bpe.model"""}
__lowerCamelCase = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
}
__lowerCamelCase = {
"""moussaKam/mbarthez""": 1_024,
"""moussaKam/barthez""": 1_024,
"""moussaKam/barthez-orangesum-title""": 1_024,
}
__lowerCamelCase = """▁"""
class UpperCamelCase_ ( _snake_case ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ["""input_ids""", """attention_mask"""]
def __init__( self , lowercase , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase = None , **lowercase , ) -> str:
# Mask token behave like a normal word, i.e. include the space before it
_a : List[str] = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else mask_token
_a : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , **snake_case_ , )
_a : Optional[Any] = vocab_file
_a : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case_ ) )
_a : Optional[Any] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
_a : Any = len(self.sp_model ) - 1
_a : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def snake_case__( self , lowercase , lowercase = None ) -> Union[str, Any]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_a : List[Any] = [self.cls_token_id]
_a : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case__( self , lowercase , lowercase = None , lowercase = False ) -> List[Any]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case_ )) + [1]
return [1] + ([0] * len(snake_case_ )) + [1, 1] + ([0] * len(snake_case_ )) + [1]
def snake_case__( self , lowercase , lowercase = None ) -> Optional[int]:
_a : Dict = [self.sep_token_id]
_a : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def snake_case__( self ) -> Dict:
return len(self.sp_model )
def snake_case__( self ) -> Optional[int]:
_a : List[str] = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case__( self , lowercase ) -> Any:
return self.sp_model.encode(snake_case_ , out_type=snake_case_ )
def snake_case__( self , lowercase ) -> Dict:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_a : Optional[int] = self.sp_model.PieceToId(snake_case_ )
return spm_id if spm_id else self.unk_token_id
def snake_case__( self , lowercase ) -> Union[str, Any]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(snake_case_ )
def snake_case__( self , lowercase ) -> Tuple:
_a : Optional[Any] = []
_a : List[Any] = ""
_a : Dict = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case_ ) + token
_a : Tuple = True
_a : int = []
else:
current_sub_tokens.append(snake_case_ )
_a : Dict = False
out_string += self.sp_model.decode(snake_case_ )
return out_string.strip()
def __getstate__( self ) -> Dict:
_a : Dict = self.__dict__.copy()
_a : int = None
return state
def __setstate__( self , lowercase ) -> List[Any]:
_a : str = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_a : List[str] = {}
_a : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case__( self , lowercase , lowercase = None ) -> Union[str, Any]:
if not os.path.isdir(snake_case_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_a : Any = os.path.join(
snake_case_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case_ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case_ , '''wb''' ) as fi:
_a : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (out_vocab_file,) | 721 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase = logging.get_logger(__name__)
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase=False ) -> List[str]:
"""simple docstring"""
_a : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'blocks.{i}.norm1.weight', F'deit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'blocks.{i}.norm1.bias', F'deit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((F'blocks.{i}.attn.proj.weight', F'deit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'blocks.{i}.attn.proj.bias', F'deit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'blocks.{i}.norm2.weight', F'deit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'blocks.{i}.norm2.bias', F'deit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'deit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'deit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'deit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'deit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''deit.embeddings.cls_token'''),
('''dist_token''', '''deit.embeddings.distillation_token'''),
('''patch_embed.proj.weight''', '''deit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''deit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''deit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
_a : List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('''deit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('''norm.weight''', '''deit.layernorm.weight'''),
('''norm.bias''', '''deit.layernorm.bias'''),
('''head.weight''', '''cls_classifier.weight'''),
('''head.bias''', '''cls_classifier.bias'''),
('''head_dist.weight''', '''distillation_classifier.weight'''),
('''head_dist.bias''', '''distillation_classifier.bias'''),
] )
return rename_keys
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) -> List[str]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_a : List[str] = ''''''
else:
_a : str = '''deit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_a : Optional[Any] = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
_a : str = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_a : Any = in_proj_weight[
: config.hidden_size, :
]
_a : Tuple = in_proj_bias[: config.hidden_size]
_a : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_a : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_a : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
_a : Union[str, Any] = in_proj_bias[-config.hidden_size :]
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
"""simple docstring"""
_a : List[str] = dct.pop(UpperCAmelCase )
_a : Dict = val
def UpperCamelCase__ ( ) -> Optional[int]:
"""simple docstring"""
_a : Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_a : List[str] = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
_a : str = DeiTConfig()
# all deit models have fine-tuned heads
_a : Tuple = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
_a : List[str] = 1000
_a : Tuple = '''huggingface/label-files'''
_a : Tuple = '''imagenet-1k-id2label.json'''
_a : Union[str, Any] = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
_a : List[str] = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
_a : List[Any] = idalabel
_a : Any = {v: k for k, v in idalabel.items()}
_a : List[str] = int(deit_name[-6:-4] )
_a : str = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('''tiny''' ):
_a : List[Any] = 192
_a : Optional[Any] = 768
_a : Optional[int] = 12
_a : Union[str, Any] = 3
elif deit_name[9:].startswith('''small''' ):
_a : List[Any] = 384
_a : Tuple = 1536
_a : List[Any] = 12
_a : int = 6
if deit_name[9:].startswith('''base''' ):
pass
elif deit_name[4:].startswith('''large''' ):
_a : List[str] = 1024
_a : Dict = 4096
_a : List[Any] = 24
_a : Dict = 16
# load original model from timm
_a : int = timm.create_model(UpperCAmelCase , pretrained=UpperCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_a : List[Any] = timm_model.state_dict()
_a : Any = create_rename_keys(UpperCAmelCase , UpperCAmelCase )
for src, dest in rename_keys:
rename_key(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
read_in_q_k_v(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# load HuggingFace model
_a : str = DeiTForImageClassificationWithTeacher(UpperCAmelCase ).eval()
model.load_state_dict(UpperCAmelCase )
# Check outputs on an image, prepared by DeiTImageProcessor
_a : str = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
_a : Optional[Any] = DeiTImageProcessor(size=UpperCAmelCase , crop_size=config.image_size )
_a : Any = image_processor(images=prepare_img() , return_tensors='''pt''' )
_a : int = encoding['''pixel_values''']
_a : List[Any] = model(UpperCAmelCase )
_a : Tuple = timm_model(UpperCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(UpperCAmelCase , outputs.logits , atol=1e-3 )
Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase )
print(F'Saving model {deit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCAmelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--deit_name',
default='vit_deit_base_distilled_patch16_224',
type=str,
help='Name of the DeiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__lowerCamelCase = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path) | 307 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
lowerCamelCase__ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self : List[Any] , *__a : Optional[int] , **__a : Union[str, Any] ) -> None:
warnings.warn(
"The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use VideoMAEImageProcessor instead." , __a , )
super().__init__(*__a , **__a )
| 624 |
"""simple docstring"""
import warnings
warnings.warn(
"memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: "
"`from accelerate import find_executable_batch_size` to avoid this warning.",
FutureWarning,
)
| 624 | 1 |
'''simple docstring'''
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"The RoBERTa Model transformer with early exiting (DeeRoBERTa). " ,_lowerCAmelCase ,)
class lowerCAmelCase__ ( _lowerCAmelCase ):
A = RobertaConfig
A = "roberta"
def __init__( self : str , UpperCamelCase_ : Optional[Any] ) -> str:
"""simple docstring"""
super().__init__(UpperCamelCase_ )
lowerCamelCase_ : List[str] = RobertaEmbeddings(UpperCamelCase_ )
self.init_weights()
@add_start_docstrings(
"RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. " ,_lowerCAmelCase ,)
class lowerCAmelCase__ ( _lowerCAmelCase ):
A = RobertaConfig
A = "roberta"
def __init__( self : Optional[int] , UpperCamelCase_ : List[str] ) -> Tuple:
"""simple docstring"""
super().__init__(UpperCamelCase_ )
lowerCamelCase_ : Union[str, Any] = config.num_labels
lowerCamelCase_ : Dict = config.num_hidden_layers
lowerCamelCase_ : Union[str, Any] = DeeRobertaModel(UpperCamelCase_ )
lowerCamelCase_ : Union[str, Any] = nn.Dropout(config.hidden_dropout_prob )
lowerCamelCase_ : str = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
def __UpperCamelCase ( self : List[str] , UpperCamelCase_ : int=None , UpperCamelCase_ : List[str]=None , UpperCamelCase_ : Dict=None , UpperCamelCase_ : str=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : int=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : List[str]=-1 , UpperCamelCase_ : Optional[Any]=False , ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = self.num_layers
try:
lowerCamelCase_ : Union[str, Any] = self.roberta(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ , )
lowerCamelCase_ : Union[str, Any] = outputs[1]
lowerCamelCase_ : Optional[int] = self.dropout(UpperCamelCase_ )
lowerCamelCase_ : Dict = self.classifier(UpperCamelCase_ )
lowerCamelCase_ : Union[str, Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
lowerCamelCase_ : List[str] = e.message
lowerCamelCase_ : List[str] = e.exit_layer
lowerCamelCase_ : Optional[Any] = outputs[0]
if not self.training:
lowerCamelCase_ : str = entropy(UpperCamelCase_ )
lowerCamelCase_ : Tuple = []
lowerCamelCase_ : int = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
lowerCamelCase_ : List[Any] = MSELoss()
lowerCamelCase_ : Dict = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
lowerCamelCase_ : Optional[int] = CrossEntropyLoss()
lowerCamelCase_ : int = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
lowerCamelCase_ : Optional[Any] = []
for highway_exit in outputs[-1]:
lowerCamelCase_ : List[str] = highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCamelCase_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
lowerCamelCase_ : Union[str, Any] = MSELoss()
lowerCamelCase_ : int = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
lowerCamelCase_ : Union[str, Any] = CrossEntropyLoss()
lowerCamelCase_ : Any = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(UpperCamelCase_ )
if train_highway:
lowerCamelCase_ : Any = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
lowerCamelCase_ : Optional[int] = (loss,) + outputs
if not self.training:
lowerCamelCase_ : Optional[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
lowerCamelCase_ : int = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 418 |
'''simple docstring'''
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__lowerCamelCase : Union[str, Any] = """\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
"""
__lowerCamelCase : Union[str, Any] = """\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
"""
__lowerCamelCase : Union[str, Any] = """
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for 'record': list of question-answer dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'prediction_text': the predicted answer text
- for 'multirc': list of question-answer dictionaries with the following keys:
- 'idx': index of the question-answer pair as specified by the dataset
- 'prediction': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for 'record': list of question-answers dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'answers': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for 'record':
- 'exact_match': Exact match between answer and gold answer
- 'f1': F1 score
- for 'multirc':
- 'exact_match': Exact match between answer and gold answer
- 'f1_m': Per-question macro-F1 score
- 'f1_a': Average F1 score over all answers
- for 'axb':
'matthews_correlation': Matthew Correlation
- for 'cb':
- 'accuracy': Accuracy
- 'f1': F1 score
- for all others:
- 'accuracy': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'cb')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'record')
>>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]
>>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')
>>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'axb')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def __snake_case (__UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
return float((preds == labels).mean() )
def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="binary" ):
"""simple docstring"""
lowerCamelCase_ : str = simple_accuracy(__UpperCAmelCase , __UpperCAmelCase )
lowerCamelCase_ : Dict = float(fa_score(y_true=__UpperCAmelCase , y_pred=__UpperCAmelCase , average=__UpperCAmelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def __snake_case (__UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : Dict = {}
for id_pred, label in zip(__UpperCAmelCase , __UpperCAmelCase ):
lowerCamelCase_ : List[str] = F"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}"""
lowerCamelCase_ : List[Any] = id_pred['''prediction''']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
lowerCamelCase_ : Optional[Any] = [(pred, label)]
lowerCamelCase_ , lowerCamelCase_ : List[str] = [], []
for question, preds_labels in question_map.items():
lowerCamelCase_ , lowerCamelCase_ : Any = zip(*__UpperCAmelCase )
lowerCamelCase_ : Dict = fa_score(y_true=__UpperCAmelCase , y_pred=__UpperCAmelCase , average='''macro''' )
fas.append(__UpperCAmelCase )
lowerCamelCase_ : Any = int(sum(pred == label for pred, label in preds_labels ) == len(__UpperCAmelCase ) )
ems.append(__UpperCAmelCase )
lowerCamelCase_ : int = float(sum(__UpperCAmelCase ) / len(__UpperCAmelCase ) )
lowerCamelCase_ : str = sum(__UpperCAmelCase ) / len(__UpperCAmelCase )
lowerCamelCase_ : str = float(fa_score(y_true=__UpperCAmelCase , y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None , )
def __UpperCamelCase ( self : Tuple ) -> Any:
"""simple docstring"""
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"prediction_text": datasets.Value('''string''' ),
},
"references": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"answers": datasets.Sequence(datasets.Value('''string''' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('''int64''' ),
"paragraph": datasets.Value('''int64''' ),
"question": datasets.Value('''int64''' ),
},
"prediction": datasets.Value('''int64''' ),
},
"references": datasets.Value('''int64''' ),
}
else:
return {
"predictions": datasets.Value('''int64''' ),
"references": datasets.Value('''int64''' ),
}
def __UpperCamelCase ( self : Any , UpperCamelCase_ : List[str] , UpperCamelCase_ : str ) -> Optional[Any]:
"""simple docstring"""
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(UpperCamelCase_ , UpperCamelCase_ )}
elif self.config_name == "cb":
return acc_and_fa(UpperCamelCase_ , UpperCamelCase_ , fa_avg='''macro''' )
elif self.config_name == "record":
lowerCamelCase_ : List[str] = [
{
'''qas''': [
{'''id''': ref['''idx''']['''query'''], '''answers''': [{'''text''': ans} for ans in ref['''answers''']]}
for ref in references
]
}
]
lowerCamelCase_ : Any = {pred['''idx''']['''query''']: pred['''prediction_text'''] for pred in predictions}
return evaluate_record(UpperCamelCase_ , UpperCamelCase_ )[0]
elif self.config_name == "multirc":
return evaluate_multirc(UpperCamelCase_ , UpperCamelCase_ )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(UpperCamelCase_ , UpperCamelCase_ )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
| 418 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
__lowerCAmelCase = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def a ( a , a , a , a , a , a ) ->Union[str, Any]:
'''simple docstring'''
for attribute in key.split('''.''' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
SCREAMING_SNAKE_CASE = '''lm_head'''
SCREAMING_SNAKE_CASE = getattr(a , a )
if weight_type is not None:
SCREAMING_SNAKE_CASE = getattr(a , a ).shape
else:
SCREAMING_SNAKE_CASE = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE = value
else:
SCREAMING_SNAKE_CASE = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def a ( a , a , a ) ->Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE = False
if "conv_layers" in name:
load_conv_layer(
a , a , a , a , hf_model.config.feat_extract_norm == '''group''' , )
SCREAMING_SNAKE_CASE = True
else:
for key, mapped_key in MAPPING.items():
SCREAMING_SNAKE_CASE = '''unispeech.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
SCREAMING_SNAKE_CASE = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE = name.split(a )[0].split('''.''' )[-2]
SCREAMING_SNAKE_CASE = mapped_key.replace('''*''' , a )
if "weight_g" in name:
SCREAMING_SNAKE_CASE = '''weight_g'''
elif "weight_v" in name:
SCREAMING_SNAKE_CASE = '''weight_v'''
elif "bias" in name:
SCREAMING_SNAKE_CASE = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
SCREAMING_SNAKE_CASE = '''weight'''
else:
SCREAMING_SNAKE_CASE = None
set_recursively(a , a , a , a , a , a )
continue
if not is_used:
unused_weights.append(a )
logger.warning(F"""Unused weights: {unused_weights}""" )
def a ( a , a , a , a , a ) ->int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = full_name.split('''conv_layers.''' )[-1]
SCREAMING_SNAKE_CASE = name.split('''.''' )
SCREAMING_SNAKE_CASE = int(items[0] )
SCREAMING_SNAKE_CASE = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
SCREAMING_SNAKE_CASE = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
SCREAMING_SNAKE_CASE = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
SCREAMING_SNAKE_CASE = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
SCREAMING_SNAKE_CASE = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(a )
@torch.no_grad()
def a ( a , a , a=None , a=None , a=True ) ->List[Any]:
'''simple docstring'''
if config_path is not None:
SCREAMING_SNAKE_CASE = UniSpeechConfig.from_pretrained(a )
else:
SCREAMING_SNAKE_CASE = UniSpeechConfig()
if is_finetuned:
if dict_path:
SCREAMING_SNAKE_CASE = Dictionary.load_from_json(a )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
SCREAMING_SNAKE_CASE = target_dict.pad_index
SCREAMING_SNAKE_CASE = target_dict.bos_index
SCREAMING_SNAKE_CASE = target_dict.eos_index
SCREAMING_SNAKE_CASE = len(target_dict.symbols )
SCREAMING_SNAKE_CASE = os.path.join(a , '''vocab.json''' )
if not os.path.isdir(a ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(a ) )
return
os.makedirs(a , exist_ok=a )
SCREAMING_SNAKE_CASE = target_dict.indices
# fairseq has the <pad> and <s> switched
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 43
with open(a , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(a , a )
SCREAMING_SNAKE_CASE = WavaVecaPhonemeCTCTokenizer(
a , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=a , )
SCREAMING_SNAKE_CASE = True if config.feat_extract_norm == '''layer''' else False
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=a , return_attention_mask=a , )
SCREAMING_SNAKE_CASE = WavaVecaProcessor(feature_extractor=a , tokenizer=a )
processor.save_pretrained(a )
SCREAMING_SNAKE_CASE = UniSpeechForCTC(a )
else:
SCREAMING_SNAKE_CASE = UniSpeechForPreTraining(a )
if is_finetuned:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ), '''w2v_path''': checkpoint_path} )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
SCREAMING_SNAKE_CASE = model[0].eval()
recursively_load_weights(a , a , a )
hf_unispeech.save_pretrained(a )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
__lowerCAmelCase = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
) | 201 |
from __future__ import annotations
def a ( a ) ->float:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 0.00
SCREAMING_SNAKE_CASE = 0
for resistor in resistors:
if resistor <= 0:
SCREAMING_SNAKE_CASE = F"""Resistor at index {index} has a negative or zero value!"""
raise ValueError(a )
first_sum += 1 / float(a )
index += 1
return 1 / first_sum
def a ( a ) ->float:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 0.00
SCREAMING_SNAKE_CASE = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
SCREAMING_SNAKE_CASE = F"""Resistor at index {index} has a negative value!"""
raise ValueError(a )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod() | 201 | 1 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def __UpperCAmelCase ( ):
snake_case_ = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
snake_case_ = Image.open(requests.get(a_ , stream=a_).raw).convert('RGB')
return image
def __UpperCAmelCase ( a_):
snake_case_ = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding'))
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding'))
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight'))
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias'))
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight'))
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias'))
for i in range(config.vision_config.num_hidden_layers):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight'''))
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias'''))
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight'''))
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias'''))
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight'''))
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',))
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias'''))
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight'''))
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias'''))
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight'''))
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias'''))
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight'))
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias'))
# fmt: on
return rename_keys
def __UpperCAmelCase ( a_ , a_ , a_):
snake_case_ = dct.pop(a_)
snake_case_ = val
def __UpperCAmelCase ( a_ , a_):
for i in range(config.vision_config.num_hidden_layers):
# read in original q and v biases
snake_case_ = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''')
snake_case_ = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''')
# next, set bias in the state dict
snake_case_ = torch.cat((q_bias, torch.zeros_like(a_ , requires_grad=a_), v_bias))
snake_case_ = qkv_bias
def __UpperCAmelCase ( a_ , a_):
snake_case_ = 3_64 if 'coco' in model_name else 2_24
snake_case_ = BlipaVisionConfig(image_size=a_).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
snake_case_ = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=a_).to_dict()
elif "opt-6.7b" in model_name:
snake_case_ = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=a_).to_dict()
elif "t5-xl" in model_name:
snake_case_ = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1).to_dict()
elif "t5-xxl" in model_name:
snake_case_ = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1).to_dict()
snake_case_ = BlipaConfig(vision_config=a_ , text_config=a_)
return config, image_size
@torch.no_grad()
def __UpperCAmelCase ( a_ , a_=None , a_=False):
snake_case_ = (
AutoTokenizer.from_pretrained('facebook/opt-2.7b')
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl')
)
snake_case_ = tokenizer('\n' , add_special_tokens=a_).input_ids[0]
snake_case_ , snake_case_ = get_blipa_config(a_ , eos_token_id=a_)
snake_case_ = BlipaForConditionalGeneration(a_).eval()
snake_case_ = {
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
snake_case_ , snake_case_ = model_name_to_original[model_name]
# load original model
print('Loading original model...')
snake_case_ = 'cuda' if torch.cuda.is_available() else 'cpu'
snake_case_ , snake_case_ , snake_case_ = load_model_and_preprocess(
name=a_ , model_type=a_ , is_eval=a_ , device=a_)
original_model.eval()
print('Done!')
# update state dict keys
snake_case_ = original_model.state_dict()
snake_case_ = create_rename_keys(a_)
for src, dest in rename_keys:
rename_key(a_ , a_ , a_)
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
snake_case_ = state_dict.pop(a_)
if key.startswith('Qformer.bert'):
snake_case_ = key.replace('Qformer.bert' , 'qformer')
if "attention.self" in key:
snake_case_ = key.replace('self' , 'attention')
if "opt_proj" in key:
snake_case_ = key.replace('opt_proj' , 'language_projection')
if "t5_proj" in key:
snake_case_ = key.replace('t5_proj' , 'language_projection')
if key.startswith('opt'):
snake_case_ = key.replace('opt' , 'language')
if key.startswith('t5'):
snake_case_ = key.replace('t5' , 'language')
snake_case_ = val
# read in qv biases
read_in_q_v_bias(a_ , a_)
snake_case_ , snake_case_ = hf_model.load_state_dict(a_ , strict=a_)
assert len(a_) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
snake_case_ = load_demo_image()
snake_case_ = vis_processors['eval'](a_).unsqueeze(0).to(a_)
snake_case_ = tokenizer(['\n'] , return_tensors='pt').input_ids.to(a_)
# create processor
snake_case_ = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=a_ , image_std=a_)
snake_case_ = BlipaProcessor(image_processor=a_ , tokenizer=a_)
snake_case_ = processor(images=a_ , return_tensors='pt').pixel_values.to(a_)
# make sure processor creates exact same pixel values
assert torch.allclose(a_ , a_)
original_model.to(a_)
hf_model.to(a_)
with torch.no_grad():
if "opt" in model_name:
snake_case_ = original_model({'image': original_pixel_values, 'text_input': ['']}).logits
snake_case_ = hf_model(a_ , a_).logits
else:
snake_case_ = original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']}).logits
snake_case_ = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00)
snake_case_ = hf_model(a_ , a_ , labels=a_).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3])
print('First values of HF logits:' , logits[0, :3, :3])
# assert values
if model_name == "blip2-flan-t5-xl":
snake_case_ = torch.tensor(
[[-41.58_50, -4.44_40, -8.99_22], [-47.43_22, -5.91_43, -1.73_40]] , device=a_)
assert torch.allclose(logits[0, :3, :3] , a_ , atol=1E-4)
elif model_name == "blip2-flan-t5-xl-coco":
snake_case_ = torch.tensor(
[[-57.01_09, -9.89_67, -12.62_80], [-68.65_78, -12.71_91, -10.50_65]] , device=a_)
else:
# cast to same type
snake_case_ = logits.dtype
assert torch.allclose(original_logits.to(a_) , a_ , atol=1E-2)
print('Looks ok!')
print('Generating a caption...')
snake_case_ = ''
snake_case_ = tokenizer(a_ , return_tensors='pt').input_ids.to(a_)
snake_case_ = original_model.generate({'image': original_pixel_values})
snake_case_ = hf_model.generate(
a_ , a_ , do_sample=a_ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , a_)
snake_case_ = input_ids.shape[1]
snake_case_ = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=a_)
snake_case_ = [text.strip() for text in output_text]
print('HF generation:' , a_)
if pytorch_dump_folder_path is not None:
processor.save_pretrained(a_)
hf_model.save_pretrained(a_)
if push_to_hub:
processor.push_to_hub(f'''nielsr/{model_name}''')
hf_model.push_to_hub(f'''nielsr/{model_name}''')
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
lowercase = [
"blip2-opt-2.7b",
"blip2-opt-6.7b",
"blip2-opt-2.7b-coco",
"blip2-opt-6.7b-coco",
"blip2-flan-t5-xl",
"blip2-flan-t5-xl-coco",
"blip2-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="blip2-opt-2.7b",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
lowercase = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 702 |
import tensorflow as tf
from ...tf_utils import shape_list
class UpperCamelCase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , a , a , a , a , a=1 , a=False , **a ) -> List[str]:
super().__init__(**a )
snake_case_ = vocab_size
snake_case_ = d_embed
snake_case_ = d_proj
snake_case_ = cutoffs + [vocab_size]
snake_case_ = [0] + self.cutoffs
snake_case_ = div_val
snake_case_ = self.cutoffs[0]
snake_case_ = len(self.cutoffs ) - 1
snake_case_ = self.shortlist_size + self.n_clusters
snake_case_ = keep_order
snake_case_ = []
snake_case_ = []
def _UpperCamelCase ( self , a ) -> int:
if self.n_clusters > 0:
snake_case_ = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer='zeros' , trainable=a , name='cluster_weight' )
snake_case_ = self.add_weight(
shape=(self.n_clusters,) , initializer='zeros' , trainable=a , name='cluster_bias' )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
snake_case_ = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer='zeros' , trainable=a , name=F'''out_projs_._{i}''' , )
self.out_projs.append(a )
else:
self.out_projs.append(a )
snake_case_ = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer='zeros' , trainable=a , name=F'''out_layers_._{i}_._weight''' , )
snake_case_ = self.add_weight(
shape=(self.vocab_size,) , initializer='zeros' , trainable=a , name=F'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
snake_case_ , snake_case_ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
snake_case_ = self.d_embed // (self.div_val**i)
snake_case_ = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer='zeros' , trainable=a , name=F'''out_projs_._{i}''' )
self.out_projs.append(a )
snake_case_ = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer='zeros' , trainable=a , name=F'''out_layers_._{i}_._weight''' , )
snake_case_ = self.add_weight(
shape=(r_idx - l_idx,) , initializer='zeros' , trainable=a , name=F'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
super().build(a )
@staticmethod
def _UpperCamelCase ( a , a , a , a=None ) -> int:
snake_case_ = x
if proj is not None:
snake_case_ = tf.einsum('ibd,ed->ibe' , a , a )
return tf.einsum('ibd,nd->ibn' , a , a ) + b
@staticmethod
def _UpperCamelCase ( a , a ) -> Dict:
snake_case_ = shape_list(a )
snake_case_ = tf.range(lp_size[0] , dtype=target.dtype )
snake_case_ = tf.stack([r, target] , 1 )
return tf.gather_nd(a , a )
def _UpperCamelCase ( self , a , a , a=True , a=False ) -> Optional[int]:
snake_case_ = 0
if self.n_clusters == 0:
snake_case_ = self._logit(a , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
snake_case_ = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=a , logits=a )
snake_case_ = tf.nn.log_softmax(a , axis=-1 )
else:
snake_case_ = shape_list(a )
snake_case_ = []
snake_case_ = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
snake_case_ , snake_case_ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
snake_case_ = (target >= l_idx) & (target < r_idx)
snake_case_ = tf.where(a )
snake_case_ = tf.boolean_mask(a , a ) - l_idx
if self.div_val == 1:
snake_case_ = self.out_layers[0][0][l_idx:r_idx]
snake_case_ = self.out_layers[0][1][l_idx:r_idx]
else:
snake_case_ = self.out_layers[i][0]
snake_case_ = self.out_layers[i][1]
if i == 0:
snake_case_ = tf.concat([cur_W, self.cluster_weight] , 0 )
snake_case_ = tf.concat([cur_b, self.cluster_bias] , 0 )
snake_case_ = self._logit(a , a , a , self.out_projs[0] )
snake_case_ = tf.nn.log_softmax(a )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
snake_case_ = tf.boolean_mask(a , a )
snake_case_ = self._gather_logprob(a , a )
else:
snake_case_ = self._logit(a , a , a , self.out_projs[i] )
snake_case_ = tf.nn.log_softmax(a )
snake_case_ = self.cutoffs[0] + i - 1 # No probability for the head cluster
snake_case_ = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(a )
if target is not None:
snake_case_ = tf.boolean_mask(a , a )
snake_case_ = tf.boolean_mask(a , a )
snake_case_ = self._gather_logprob(a , a )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(a , -cur_logprob , shape_list(a ) )
snake_case_ = tf.concat(a , axis=-1 )
if target is not None:
if return_mean:
snake_case_ = tf.reduce_mean(a )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(a )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(a , name=self.name , aggregation='mean' if return_mean else '' )
return out
| 607 | 0 |
from __future__ import annotations
def _lowercase( __a : list[list[int]] ):
# preprocessing the first row
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(__a ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(__a ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 | """simple docstring"""
def _A( lowerCAmelCase , lowerCAmelCase ):
A__ : List[Any] = [1]
for i in range(2 , lowerCAmelCase ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
A__ : Union[str, Any] = []
A__ : int = list(range(lowerCAmelCase ) )
# Find permutation
while factorials:
A__ : Optional[int] = factorials.pop()
A__ , A__ : Union[str, Any] = divmod(lowerCAmelCase , lowerCAmelCase )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363 | 0 |
'''simple docstring'''
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 508 |
'''simple docstring'''
from __future__ import annotations
import math
def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: int ,__UpperCamelCase: bool ,__UpperCamelCase: list[int] ,__UpperCamelCase: float ):
"""simple docstring"""
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if len(__UpperCamelCase ) == 0:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 ,node_index * 2 ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) ,minimax(depth + 1 ,node_index * 2 + 1 ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) ,)
return min(
minimax(depth + 1 ,node_index * 2 ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) ,minimax(depth + 1 ,node_index * 2 + 1 ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) ,)
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
SCREAMING_SNAKE_CASE : List[Any] = math.log(len(__UpperCamelCase ) ,2 )
print('Optimal value : ' ,end='' )
print(minimax(0 ,0 ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 508 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE,unittest.TestCase ):
A__ : str = LDMTextToImagePipeline
A__ : Union[str, Any] = TEXT_TO_IMAGE_PARAMS - {
'''negative_prompt''',
'''negative_prompt_embeds''',
'''cross_attention_kwargs''',
'''prompt_embeds''',
}
A__ : Optional[Any] = PipelineTesterMixin.required_optional_params - {
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
A__ : List[str] = TEXT_TO_IMAGE_BATCH_PARAMS
A__ : List[Any] = False
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
_snake_case = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
_snake_case = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=__lowerCamelCase , set_alpha_to_one=__lowerCamelCase , )
torch.manual_seed(0 )
_snake_case = AutoencoderKL(
block_out_channels=(3_2, 6_4) , in_channels=3 , out_channels=3 , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , latent_channels=4 , )
torch.manual_seed(0 )
_snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
_snake_case = CLIPTextModel(__lowerCamelCase )
_snake_case = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_snake_case = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vqvae''': vae,
'''bert''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict=0 ):
"""simple docstring"""
if str(__lowerCamelCase ).startswith('''mps''' ):
_snake_case = torch.manual_seed(__lowerCamelCase )
else:
_snake_case = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
_snake_case = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_snake_case = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_snake_case = self.get_dummy_components()
_snake_case = LDMTextToImagePipeline(**__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
_snake_case = self.get_dummy_inputs(__lowerCamelCase )
_snake_case = pipe(**__lowerCamelCase ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_6, 1_6, 3)
_snake_case = np.array([0.6_1_0_1, 0.6_1_5_6, 0.5_6_2_2, 0.4_8_9_5, 0.6_6_6_1, 0.3_8_0_4, 0.5_7_4_8, 0.6_1_3_6, 0.5_0_1_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self : str ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple=torch.floataa , __lowerCamelCase : Optional[int]=0 ):
"""simple docstring"""
_snake_case = torch.manual_seed(__lowerCamelCase )
_snake_case = np.random.RandomState(__lowerCamelCase ).standard_normal((1, 4, 3_2, 3_2) )
_snake_case = torch.from_numpy(__lowerCamelCase ).to(device=__lowerCamelCase , dtype=__lowerCamelCase )
_snake_case = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
_snake_case = LDMTextToImagePipeline.from_pretrained('''CompVis/ldm-text2im-large-256''' ).to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
_snake_case = self.get_inputs(__lowerCamelCase )
_snake_case = pipe(**__lowerCamelCase ).images
_snake_case = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 2_5_6, 2_5_6, 3)
_snake_case = np.array([0.5_1_8_2_5, 0.5_2_8_5_0, 0.5_2_5_4_3, 0.5_4_2_5_8, 0.5_2_3_0_4, 0.5_2_5_6_9, 0.5_4_3_6_3, 0.5_5_2_7_6, 0.5_6_8_7_8] )
_snake_case = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1E-3
@nightly
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : Any , __lowerCamelCase : int=torch.floataa , __lowerCamelCase : Tuple=0 ):
"""simple docstring"""
_snake_case = torch.manual_seed(__lowerCamelCase )
_snake_case = np.random.RandomState(__lowerCamelCase ).standard_normal((1, 4, 3_2, 3_2) )
_snake_case = torch.from_numpy(__lowerCamelCase ).to(device=__lowerCamelCase , dtype=__lowerCamelCase )
_snake_case = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 5_0,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
_snake_case = LDMTextToImagePipeline.from_pretrained('''CompVis/ldm-text2im-large-256''' ).to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
_snake_case = self.get_inputs(__lowerCamelCase )
_snake_case = pipe(**__lowerCamelCase ).images[0]
_snake_case = load_numpy(
'''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy''' )
_snake_case = np.abs(expected_image - image ).max()
assert max_diff < 1E-3
| 103 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__magic_name__ : str = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-classification/requirements.txt''')
__magic_name__ : int = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
__magic_name__ : int = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def A__ ( A_ ) -> Any:
with open(A_ , "rb" ) as f:
_lowercase = Image.open(A_ )
return im.convert("RGB" )
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
UpperCAmelCase__ = field(
default=lowerCamelCase__ , metadata={
'help': 'Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'
} , )
UpperCAmelCase__ = field(
default=lowerCamelCase__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
UpperCAmelCase__ = field(default=lowerCamelCase__ , metadata={'help': 'A folder containing the training data.'} )
UpperCAmelCase__ = field(default=lowerCamelCase__ , metadata={'help': 'A folder containing the validation data.'} )
UpperCAmelCase__ = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
UpperCAmelCase__ = field(
default=lowerCamelCase__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
UpperCAmelCase__ = field(
default=lowerCamelCase__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def snake_case ( self : int ):
"""simple docstring"""
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
"You must specify either a dataset name from the hub or a train and/or validation directory." )
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
UpperCAmelCase__ = field(
default='google/vit-base-patch16-224-in21k' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
UpperCAmelCase__ = field(
default=lowerCamelCase__ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(lowerCamelCase__ )} , )
UpperCAmelCase__ = field(
default=lowerCamelCase__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
UpperCAmelCase__ = field(
default=lowerCamelCase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
UpperCAmelCase__ = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
UpperCAmelCase__ = field(default=lowerCamelCase__ , metadata={'help': 'Name or path of preprocessor config.'} )
UpperCAmelCase__ = field(
default=lowerCamelCase__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
UpperCAmelCase__ = field(
default=lowerCamelCase__ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def A__ ( A_ ) -> Optional[Any]:
_lowercase = torch.stack([example["pixel_values"] for example in examples] )
_lowercase = torch.tensor([example["labels"] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def A__ ( ) -> Optional[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowercase , _lowercase , _lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowercase , _lowercase , _lowercase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_image_classification" , A_ , A_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_lowercase = training_args.get_process_log_level()
logger.setLevel(A_ )
transformers.utils.logging.set_verbosity(A_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
_lowercase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowercase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
_lowercase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task="image-classification" , use_auth_token=True if model_args.use_auth_token else None , )
else:
_lowercase = {}
if data_args.train_dir is not None:
_lowercase = os.path.join(data_args.train_dir , "**" )
if data_args.validation_dir is not None:
_lowercase = os.path.join(data_args.validation_dir , "**" )
_lowercase = load_dataset(
"imagefolder" , data_files=A_ , cache_dir=model_args.cache_dir , task="image-classification" , )
# If we don't have a validation split, split off a percentage of train as validation.
_lowercase = None if "validation" in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , A_ ) and data_args.train_val_split > 0.0:
_lowercase = dataset["train"].train_test_split(data_args.train_val_split )
_lowercase = split["train"]
_lowercase = split["test"]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
_lowercase = dataset["train"].features["labels"].names
_lowercase , _lowercase = {}, {}
for i, label in enumerate(A_ ):
_lowercase = str(A_ )
_lowercase = label
# Load the accuracy metric from the datasets package
_lowercase = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(A_ ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
_lowercase = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(A_ ) , labelaid=A_ , idalabel=A_ , finetuning_task="image-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_lowercase = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=A_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
_lowercase = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
_lowercase = image_processor.size["shortest_edge"]
else:
_lowercase = (image_processor.size["height"], image_processor.size["width"])
_lowercase = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
_lowercase = Compose(
[
RandomResizedCrop(A_ ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
_lowercase = Compose(
[
Resize(A_ ),
CenterCrop(A_ ),
ToTensor(),
normalize,
] )
def train_transforms(A_ ):
_lowercase = [
_train_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]
]
return example_batch
def val_transforms(A_ ):
_lowercase = [_val_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
_lowercase = (
dataset["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(A_ )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
_lowercase = (
dataset["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(A_ )
# Initalize our trainer
_lowercase = Trainer(
model=A_ , args=A_ , train_dataset=dataset["train"] if training_args.do_train else None , eval_dataset=dataset["validation"] if training_args.do_eval else None , compute_metrics=A_ , tokenizer=A_ , data_collator=A_ , )
# Training
if training_args.do_train:
_lowercase = None
if training_args.resume_from_checkpoint is not None:
_lowercase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowercase = last_checkpoint
_lowercase = trainer.train(resume_from_checkpoint=A_ )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_lowercase = trainer.evaluate()
trainer.log_metrics("eval" , A_ )
trainer.save_metrics("eval" , A_ )
# Write model card and (optionally) push to hub
_lowercase = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "image-classification",
"dataset": data_args.dataset_name,
"tags": ["image-classification", "vision"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**A_ )
else:
trainer.create_model_card(**A_ )
if __name__ == "__main__":
main()
| 497 | 0 |
from pathlib import Path
import fire
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : Optional[int] = Path(__lowerCamelCase )
__snake_case : Any = Path(__lowerCamelCase )
dest_dir.mkdir(exist_ok=__lowerCamelCase )
for path in src_dir.iterdir():
__snake_case : Optional[int] = [x.rstrip() for x in list(path.open().readlines() )][:n]
__snake_case : Dict = dest_dir.joinpath(path.name )
print(__lowerCamelCase )
dest_path.open("w" ).write("\n".join(__lowerCamelCase ) )
if __name__ == "__main__":
fire.Fire(minify)
| 203 |
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
_snake_case : int = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
_snake_case : Dict = logging.WARNING
def lowerCAmelCase_ ( ):
__snake_case : Union[str, Any] = os.getenv("DATASETS_VERBOSITY" , __lowerCamelCase )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F'Unknown option DATASETS_VERBOSITY={env_level_str}, '
F'has to be one of: { ", ".join(log_levels.keys() ) }' )
return _default_log_level
def lowerCAmelCase_ ( ):
return __name__.split("." )[0]
def lowerCAmelCase_ ( ):
return logging.getLogger(_get_library_name() )
def lowerCAmelCase_ ( ):
# Apply our default configuration to the library root logger.
__snake_case : str = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def lowerCAmelCase_ ( ):
__snake_case : Dict = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def lowerCAmelCase_ ( __lowerCamelCase = None ):
if name is None:
__snake_case : Tuple = _get_library_name()
return logging.getLogger(__lowerCamelCase )
def lowerCAmelCase_ ( ):
return _get_library_root_logger().getEffectiveLevel()
def lowerCAmelCase_ ( __lowerCamelCase ):
_get_library_root_logger().setLevel(__lowerCamelCase )
def lowerCAmelCase_ ( ):
return set_verbosity(__lowerCamelCase )
def lowerCAmelCase_ ( ):
return set_verbosity(__lowerCamelCase )
def lowerCAmelCase_ ( ):
return set_verbosity(__lowerCamelCase )
def lowerCAmelCase_ ( ):
return set_verbosity(__lowerCamelCase )
def lowerCAmelCase_ ( ):
__snake_case : List[str] = False
def lowerCAmelCase_ ( ):
__snake_case : Tuple = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class a :
"""simple docstring"""
def __init__( self : int , *lowerCamelCase : Optional[Any] , **lowerCamelCase : List[str] ) -> Optional[int]: # pylint: disable=unused-argument
__snake_case : int = args[0] if args else None
def __iter__( self : Dict ) -> Optional[int]:
return iter(self._iterator )
def __getattr__( self : int , lowerCamelCase : Optional[Any] ) -> List[Any]:
def empty_fn(*lowerCamelCase : Optional[int] , **lowerCamelCase : List[Any] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : List[str] ) -> int:
return self
def __exit__( self : Dict , lowerCamelCase : Any , lowerCamelCase : List[Any] , lowerCamelCase : str ) -> Dict:
return
_snake_case : Optional[Any] = True
class a :
"""simple docstring"""
def __call__( self : str , *lowerCamelCase : Optional[Any] , lowerCamelCase : Tuple=False , **lowerCamelCase : Union[str, Any] ) -> Optional[Any]:
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*lowerCamelCase , **lowerCamelCase )
else:
return EmptyTqdm(*lowerCamelCase , **lowerCamelCase )
def __snake_case ( self : str , *lowerCamelCase : str , **lowerCamelCase : Tuple ) -> str:
__snake_case : Optional[Any] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*lowerCamelCase , **lowerCamelCase )
def __snake_case ( self : List[Any] ) -> List[Any]:
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
_snake_case : int = _tqdm_cls()
def lowerCAmelCase_ ( ):
global _tqdm_active
return bool(_tqdm_active )
def lowerCAmelCase_ ( ):
global _tqdm_active
__snake_case : Tuple = True
def lowerCAmelCase_ ( ):
global _tqdm_active
__snake_case : List[Any] = False
| 203 | 1 |
import tensorflow as tf
from ...tf_utils import shape_list
class snake_case__ ( tf.keras.layers.Layer ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=1 , lowerCAmelCase__=False , **lowerCAmelCase__ ) -> Any:
super().__init__(**lowerCAmelCase__ )
__magic_name__ : List[Any] = vocab_size
__magic_name__ : List[Any] = d_embed
__magic_name__ : int = d_proj
__magic_name__ : Dict = cutoffs + [vocab_size]
__magic_name__ : str = [0] + self.cutoffs
__magic_name__ : List[str] = div_val
__magic_name__ : List[str] = self.cutoffs[0]
__magic_name__ : List[str] = len(self.cutoffs ) - 1
__magic_name__ : int = self.shortlist_size + self.n_clusters
__magic_name__ : Any = keep_order
__magic_name__ : Optional[Any] = []
__magic_name__ : List[Any] = []
def __magic_name__ ( self , lowerCAmelCase__ ) -> List[Any]:
if self.n_clusters > 0:
__magic_name__ : Union[str, Any] = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer="""zeros""" , trainable=lowerCAmelCase__ , name="""cluster_weight""" )
__magic_name__ : int = self.add_weight(
shape=(self.n_clusters,) , initializer="""zeros""" , trainable=lowerCAmelCase__ , name="""cluster_bias""" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
__magic_name__ : Tuple = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer="""zeros""" , trainable=lowerCAmelCase__ , name=F'out_projs_._{i}' , )
self.out_projs.append(lowerCAmelCase__ )
else:
self.out_projs.append(lowerCAmelCase__ )
__magic_name__ : str = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer="""zeros""" , trainable=lowerCAmelCase__ , name=F'out_layers_._{i}_._weight' , )
__magic_name__ : int = self.add_weight(
shape=(self.vocab_size,) , initializer="""zeros""" , trainable=lowerCAmelCase__ , name=F'out_layers_._{i}_._bias' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
__magic_name__ ,__magic_name__ : Tuple = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__magic_name__ : List[str] = self.d_embed // (self.div_val**i)
__magic_name__ : Union[str, Any] = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer="""zeros""" , trainable=lowerCAmelCase__ , name=F'out_projs_._{i}' )
self.out_projs.append(lowerCAmelCase__ )
__magic_name__ : List[str] = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer="""zeros""" , trainable=lowerCAmelCase__ , name=F'out_layers_._{i}_._weight' , )
__magic_name__ : Optional[Any] = self.add_weight(
shape=(r_idx - l_idx,) , initializer="""zeros""" , trainable=lowerCAmelCase__ , name=F'out_layers_._{i}_._bias' , )
self.out_layers.append((weight, bias) )
super().build(lowerCAmelCase__ )
@staticmethod
def __magic_name__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None ) -> Dict:
__magic_name__ : List[str] = x
if proj is not None:
__magic_name__ : str = tf.einsum("""ibd,ed->ibe""" , lowerCAmelCase__ , lowerCAmelCase__ )
return tf.einsum("""ibd,nd->ibn""" , lowerCAmelCase__ , lowerCAmelCase__ ) + b
@staticmethod
def __magic_name__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
__magic_name__ : str = shape_list(lowerCAmelCase__ )
__magic_name__ : Tuple = tf.range(lp_size[0] , dtype=target.dtype )
__magic_name__ : List[Any] = tf.stack([r, target] , 1 )
return tf.gather_nd(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=True , lowerCAmelCase__=False ) -> List[Any]:
__magic_name__ : Union[str, Any] = 0
if self.n_clusters == 0:
__magic_name__ : Union[str, Any] = self._logit(lowerCAmelCase__ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
__magic_name__ : List[Any] = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=lowerCAmelCase__ , logits=lowerCAmelCase__ )
__magic_name__ : Optional[Any] = tf.nn.log_softmax(lowerCAmelCase__ , axis=-1 )
else:
__magic_name__ : Optional[int] = shape_list(lowerCAmelCase__ )
__magic_name__ : Tuple = []
__magic_name__ : int = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
__magic_name__ ,__magic_name__ : Tuple = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
__magic_name__ : List[Any] = (target >= l_idx) & (target < r_idx)
__magic_name__ : Dict = tf.where(lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = tf.boolean_mask(lowerCAmelCase__ , lowerCAmelCase__ ) - l_idx
if self.div_val == 1:
__magic_name__ : List[str] = self.out_layers[0][0][l_idx:r_idx]
__magic_name__ : str = self.out_layers[0][1][l_idx:r_idx]
else:
__magic_name__ : Tuple = self.out_layers[i][0]
__magic_name__ : int = self.out_layers[i][1]
if i == 0:
__magic_name__ : Optional[Any] = tf.concat([cur_W, self.cluster_weight] , 0 )
__magic_name__ : Any = tf.concat([cur_b, self.cluster_bias] , 0 )
__magic_name__ : Optional[Any] = self._logit(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , self.out_projs[0] )
__magic_name__ : Optional[Any] = tf.nn.log_softmax(lowerCAmelCase__ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
__magic_name__ : Any = tf.boolean_mask(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : Optional[int] = self._gather_logprob(lowerCAmelCase__ , lowerCAmelCase__ )
else:
__magic_name__ : List[Any] = self._logit(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , self.out_projs[i] )
__magic_name__ : Optional[Any] = tf.nn.log_softmax(lowerCAmelCase__ )
__magic_name__ : str = self.cutoffs[0] + i - 1 # No probability for the head cluster
__magic_name__ : Any = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(lowerCAmelCase__ )
if target is not None:
__magic_name__ : Any = tf.boolean_mask(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : Any = tf.boolean_mask(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : Dict = self._gather_logprob(lowerCAmelCase__ , lowerCAmelCase__ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(lowerCAmelCase__ , -cur_logprob , shape_list(lowerCAmelCase__ ) )
__magic_name__ : str = tf.concat(lowerCAmelCase__ , axis=-1 )
if target is not None:
if return_mean:
__magic_name__ : Union[str, Any] = tf.reduce_mean(lowerCAmelCase__ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(lowerCAmelCase__ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(lowerCAmelCase__ , name=self.name , aggregation="""mean""" if return_mean else """""" )
return out
| 324 |
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
__magic_name__: str = True
except (ImportError, AttributeError):
__magic_name__: Dict = object
def UpperCamelCase ( *_A, **_A ):
"""simple docstring"""
pass
__magic_name__: Dict = False
__magic_name__: Optional[int] = logging.get_logger("transformers-cli/serving")
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : Tuple = pipeline(
task=args.task, model=args.model if args.model else None, config=args.config, tokenizer=args.tokenizer, device=args.device, )
return ServeCommand(_A, args.host, args.port, args.workers )
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : dict
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : List[str]
lowercase__ : Optional[List[int]]
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : str
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : Any
class snake_case__ ( _lowerCAmelCase ):
@staticmethod
def __magic_name__ ( lowerCAmelCase__ ) -> Any:
__magic_name__ : List[str] = parser.add_parser(
"""serve""" , help="""CLI tool to run inference requests through REST and GraphQL endpoints.""" )
serve_parser.add_argument(
"""--task""" , type=lowerCAmelCase__ , choices=get_supported_tasks() , help="""The task to run the pipeline on""" , )
serve_parser.add_argument("""--host""" , type=lowerCAmelCase__ , default="""localhost""" , help="""Interface the server will listen on.""" )
serve_parser.add_argument("""--port""" , type=lowerCAmelCase__ , default=88_88 , help="""Port the serving will listen to.""" )
serve_parser.add_argument("""--workers""" , type=lowerCAmelCase__ , default=1 , help="""Number of http workers""" )
serve_parser.add_argument("""--model""" , type=lowerCAmelCase__ , help="""Model's name or path to stored model.""" )
serve_parser.add_argument("""--config""" , type=lowerCAmelCase__ , help="""Model's config name or path to stored model.""" )
serve_parser.add_argument("""--tokenizer""" , type=lowerCAmelCase__ , help="""Tokenizer name to use.""" )
serve_parser.add_argument(
"""--device""" , type=lowerCAmelCase__ , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , )
serve_parser.set_defaults(func=lowerCAmelCase__ )
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
__magic_name__ : List[str] = pipeline
__magic_name__ : Any = host
__magic_name__ : List[str] = port
__magic_name__ : Any = workers
if not _serve_dependencies_installed:
raise RuntimeError(
"""Using serve command requires FastAPI and uvicorn. """
"""Please install transformers with [serving]: pip install \"transformers[serving]\"."""
"""Or install FastAPI and uvicorn separately.""" )
else:
logger.info(F'Serving model over {host}:{port}' )
__magic_name__ : Any = FastAPI(
routes=[
APIRoute(
"""/""" , self.model_info , response_model=lowerCAmelCase__ , response_class=lowerCAmelCase__ , methods=["""GET"""] , ),
APIRoute(
"""/tokenize""" , self.tokenize , response_model=lowerCAmelCase__ , response_class=lowerCAmelCase__ , methods=["""POST"""] , ),
APIRoute(
"""/detokenize""" , self.detokenize , response_model=lowerCAmelCase__ , response_class=lowerCAmelCase__ , methods=["""POST"""] , ),
APIRoute(
"""/forward""" , self.forward , response_model=lowerCAmelCase__ , response_class=lowerCAmelCase__ , methods=["""POST"""] , ),
] , timeout=6_00 , )
def __magic_name__ ( self ) -> Union[str, Any]:
run(self._app , host=self.host , port=self.port , workers=self.workers )
def __magic_name__ ( self ) -> List[Any]:
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def __magic_name__ ( self , lowerCAmelCase__ = Body(lowerCAmelCase__ , embed=lowerCAmelCase__ ) , lowerCAmelCase__ = Body(lowerCAmelCase__ , embed=lowerCAmelCase__ ) ) -> str:
try:
__magic_name__ : Dict = self._pipeline.tokenizer.tokenize(lowerCAmelCase__ )
if return_ids:
__magic_name__ : int = self._pipeline.tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
return ServeTokenizeResult(tokens=lowerCAmelCase__ , tokens_ids=lowerCAmelCase__ )
else:
return ServeTokenizeResult(tokens=lowerCAmelCase__ )
except Exception as e:
raise HTTPException(status_code=5_00 , detail={"""model""": """""", """error""": str(lowerCAmelCase__ )} )
def __magic_name__ ( self , lowerCAmelCase__ = Body(lowerCAmelCase__ , embed=lowerCAmelCase__ ) , lowerCAmelCase__ = Body(lowerCAmelCase__ , embed=lowerCAmelCase__ ) , lowerCAmelCase__ = Body(lowerCAmelCase__ , embed=lowerCAmelCase__ ) , ) -> Union[str, Any]:
try:
__magic_name__ : List[Any] = self._pipeline.tokenizer.decode(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return ServeDeTokenizeResult(model="""""" , text=lowerCAmelCase__ )
except Exception as e:
raise HTTPException(status_code=5_00 , detail={"""model""": """""", """error""": str(lowerCAmelCase__ )} )
async def __magic_name__ ( self , lowerCAmelCase__=Body(lowerCAmelCase__ , embed=lowerCAmelCase__ ) ) -> Any:
# Check we don't have empty string
if len(lowerCAmelCase__ ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
__magic_name__ : Union[str, Any] = self._pipeline(lowerCAmelCase__ )
return ServeForwardResult(output=lowerCAmelCase__ )
except Exception as e:
raise HTTPException(5_00 , {"""error""": str(lowerCAmelCase__ )} )
| 324 | 1 |
"""simple docstring"""
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = []
for line in lines:
lowercase_ = re.sub(R"""#.*""" , """""" , __lowerCAmelCase ) # remove comments
if line:
filtered_lines.append(__lowerCAmelCase )
lowercase_ = """\n""".join(__lowerCAmelCase )
# Make a hash from all this code
lowercase_ = full_str.encode("""utf-8""" )
return shaaaa(__lowerCAmelCase ).hexdigest()
# get importable module names and hash for caching
UpperCAmelCase : Tuple = {
"csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
UpperCAmelCase : str = {
".csv": ("csv", {}),
".tsv": ("csv", {"sep": "\t"}),
".json": ("json", {}),
".jsonl": ("json", {}),
".parquet": ("parquet", {}),
".arrow": ("arrow", {}),
".txt": ("text", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
UpperCAmelCase : Dict = {"imagefolder", "audiofolder"}
# Used to filter data files based on extensions given a module name
UpperCAmelCase : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(".zip")
_MODULE_TO_EXTENSIONS["audiofolder"].append(".zip")
| 100 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class SCREAMING_SNAKE_CASE__ :
# setable values
lowercase__ = None
lowercase__ = None
lowercase__ = None # sigma(t_i)
@classmethod
def _UpperCAmelCase ( cls : Any):
"""simple docstring"""
return cls()
@dataclass
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ):
@property
def _UpperCAmelCase ( self : int):
"""simple docstring"""
return True
@register_to_config
def __init__( self : Union[str, Any] , lowerCAmelCase_ : float = 0.02 , lowerCAmelCase_ : float = 1_0_0 , lowerCAmelCase_ : float = 1.007 , lowerCAmelCase_ : float = 8_0 , lowerCAmelCase_ : float = 0.05 , lowerCAmelCase_ : float = 5_0 , ):
"""simple docstring"""
pass
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
return KarrasVeSchedulerState.create()
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple = ()):
"""simple docstring"""
lowercase_ = jnp.arange(0 , lowerCAmelCase_)[::-1].copy()
lowercase_ = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=lowerCAmelCase_ , schedule=jnp.array(lowerCAmelCase_ , dtype=jnp.floataa) , timesteps=lowerCAmelCase_ , )
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : random.KeyArray , ):
"""simple docstring"""
if self.config.s_min <= sigma <= self.config.s_max:
lowercase_ = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1)
else:
lowercase_ = 0
# sample eps ~ N(0, S_noise^2 * I)
lowercase_ = random.split(lowerCAmelCase_ , num=1)
lowercase_ = self.config.s_noise * random.normal(key=lowerCAmelCase_ , shape=sample.shape)
lowercase_ = sigma + gamma * sigma
lowercase_ = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : bool = True , ):
"""simple docstring"""
lowercase_ = sample_hat + sigma_hat * model_output
lowercase_ = (sample_hat - pred_original_sample) / sigma_hat
lowercase_ = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=lowerCAmelCase_ , derivative=lowerCAmelCase_ , state=lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : bool = True , ):
"""simple docstring"""
lowercase_ = sample_prev + sigma_prev * model_output
lowercase_ = (sample_prev - pred_original_sample) / sigma_prev
lowercase_ = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=lowerCAmelCase_ , derivative=lowerCAmelCase_ , state=lowerCAmelCase_)
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any]):
"""simple docstring"""
raise NotImplementedError()
| 100 | 1 |
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__=13, lowerCamelCase__=7, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=99, lowerCamelCase__=32, lowerCamelCase__=5, lowerCamelCase__=4, lowerCamelCase__=37, lowerCamelCase__="gelu", lowerCamelCase__=0.1, lowerCamelCase__=0.1, lowerCamelCase__=512, lowerCamelCase__=16, lowerCamelCase__=2, lowerCamelCase__=0.02, lowerCamelCase__=3, lowerCamelCase__=4, lowerCamelCase__=None, ):
A : int = parent
A : List[Any] = batch_size
A : Optional[int] = seq_length
A : List[Any] = is_training
A : Union[str, Any] = use_input_mask
A : Tuple = use_token_type_ids
A : Tuple = use_labels
A : int = vocab_size
A : int = hidden_size
A : Optional[Any] = num_hidden_layers
A : List[str] = num_attention_heads
A : str = intermediate_size
A : Union[str, Any] = hidden_act
A : Tuple = hidden_dropout_prob
A : Dict = attention_probs_dropout_prob
A : Tuple = max_position_embeddings
A : int = type_vocab_size
A : Union[str, Any] = type_sequence_label_size
A : List[Any] = initializer_range
A : Any = num_labels
A : int = num_choices
A : Optional[Any] = scope
def _lowerCAmelCase ( self ):
A : str = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
A : Dict = None
if self.use_input_mask:
A : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
A : Dict = None
if self.use_token_type_ids:
A : Dict = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
A : Tuple = None
A : List[Any] = None
A : Any = None
if self.use_labels:
A : Tuple = ids_tensor([self.batch_size], self.type_sequence_label_size )
A : str = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
A : Any = ids_tensor([self.batch_size], self.num_choices )
A : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCAmelCase ( self ):
return NystromformerConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=_lowerCamelCase, initializer_range=self.initializer_range, )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A : List[Any] = NystromformerModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A : List[Any] = model(_lowerCamelCase, attention_mask=_lowerCamelCase, token_type_ids=_lowerCamelCase )
A : Union[str, Any] = model(_lowerCamelCase, token_type_ids=_lowerCamelCase )
A : Any = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A : Optional[int] = NystromformerForMaskedLM(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A : Tuple = model(_lowerCamelCase, attention_mask=_lowerCamelCase, token_type_ids=_lowerCamelCase, labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A : Dict = NystromformerForQuestionAnswering(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A : Optional[Any] = model(
_lowerCamelCase, attention_mask=_lowerCamelCase, token_type_ids=_lowerCamelCase, start_positions=_lowerCamelCase, end_positions=_lowerCamelCase, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A : List[str] = self.num_labels
A : Any = NystromformerForSequenceClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A : Optional[int] = model(_lowerCamelCase, attention_mask=_lowerCamelCase, token_type_ids=_lowerCamelCase, labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A : List[str] = self.num_labels
A : str = NystromformerForTokenClassification(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A : Union[str, Any] = model(_lowerCamelCase, attention_mask=_lowerCamelCase, token_type_ids=_lowerCamelCase, labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A : Optional[Any] = self.num_choices
A : Optional[Any] = NystromformerForMultipleChoice(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
A : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
A : int = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
A : List[Any] = model(
_lowerCamelCase, attention_mask=_lowerCamelCase, token_type_ids=_lowerCamelCase, labels=_lowerCamelCase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def _lowerCAmelCase ( self ):
A : List[Any] = self.prepare_config_and_inputs()
(
A
) : Any = config_and_inputs
A : Tuple = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Tuple = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
__lowerCamelCase : Optional[int] = (
{
"feature-extraction": NystromformerModel,
"fill-mask": NystromformerForMaskedLM,
"question-answering": NystromformerForQuestionAnswering,
"text-classification": NystromformerForSequenceClassification,
"token-classification": NystromformerForTokenClassification,
"zero-shot": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCamelCase : str = False
__lowerCamelCase : Optional[int] = False
def _lowerCAmelCase ( self ):
A : Any = NystromformerModelTester(self )
A : Tuple = ConfigTester(self, config_class=_lowerCamelCase, hidden_size=37 )
def _lowerCAmelCase ( self ):
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ):
A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _lowerCAmelCase ( self ):
A : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A : Union[str, Any] = type
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _lowerCAmelCase ( self ):
A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCamelCase )
def _lowerCAmelCase ( self ):
A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowerCamelCase )
def _lowerCAmelCase ( self ):
A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCamelCase )
def _lowerCAmelCase ( self ):
A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCamelCase )
def _lowerCAmelCase ( self ):
A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCamelCase )
@slow
def _lowerCAmelCase ( self ):
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A : str = NystromformerModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self ):
A : Dict = NystromformerModel.from_pretrained("""uw-madison/nystromformer-512""" )
A : Any = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
A : Optional[int] = model(_lowerCamelCase )[0]
A : Optional[Any] = torch.Size((1, 6, 768) )
self.assertEqual(output.shape, _lowerCamelCase )
A : Dict = torch.tensor(
[[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], _lowerCamelCase, atol=1e-4 ) )
@slow
def _lowerCAmelCase ( self ):
A : Optional[int] = '''the [MASK] of Belgium is Brussels'''
A : Dict = AutoTokenizer.from_pretrained("""uw-madison/nystromformer-512""" )
A : str = NystromformerForMaskedLM.from_pretrained("""uw-madison/nystromformer-512""" )
A : Dict = tokenizer(_lowerCamelCase, return_tensors="""pt""" )
with torch.no_grad():
A : Union[str, Any] = model(encoding.input_ids ).logits
A : Optional[Any] = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(_lowerCamelCase ), """capital""" )
| 662 |
snake_case : Any = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
snake_case : Any = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def __lowerCamelCase ( UpperCAmelCase_ : dict[int, list[int]] , UpperCAmelCase_ : int , UpperCAmelCase_ : list[bool] ):
"""simple docstring"""
a :Any = True
a :Any = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
order.append(UpperCAmelCase_ )
return order
def __lowerCamelCase ( UpperCAmelCase_ : dict[int, list[int]] , UpperCAmelCase_ : int , UpperCAmelCase_ : list[bool] ):
"""simple docstring"""
a :Optional[Any] = True
a :int = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return component
def __lowerCamelCase ( UpperCAmelCase_ : dict[int, list[int]] ):
"""simple docstring"""
a :int = len(UpperCAmelCase_ ) * [False]
a :dict[int, list[int]] = {vert: [] for vert in range(len(UpperCAmelCase_ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(UpperCAmelCase_ )
a :Optional[Any] = []
for i, was_visited in enumerate(UpperCAmelCase_ ):
if not was_visited:
order += topology_sort(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
a :Tuple = []
a :int = len(UpperCAmelCase_ ) * [False]
for i in range(len(UpperCAmelCase_ ) ):
a :Union[str, Any] = order[len(UpperCAmelCase_ ) - i - 1]
if not visited[vert]:
a :Union[str, Any] = find_components(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
components_list.append(UpperCAmelCase_ )
return components_list
| 445 | 0 |
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class __SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
snake_case : List[Any] = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
snake_case : Dict = (
{
"""feature-extraction""": TFMobileBertModel,
"""fill-mask""": TFMobileBertForMaskedLM,
"""question-answering""": TFMobileBertForQuestionAnswering,
"""text-classification""": TFMobileBertForSequenceClassification,
"""token-classification""": TFMobileBertForTokenClassification,
"""zero-shot""": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case : Any = False
snake_case : int = False
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ):
UpperCamelCase__ = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if model_class in get_values(__lowerCAmelCase ):
UpperCamelCase__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class __SCREAMING_SNAKE_CASE ( _a ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=13 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=99 , __lowerCAmelCase=32 , __lowerCAmelCase=32 , __lowerCAmelCase=2 , __lowerCAmelCase=4 , __lowerCAmelCase=37 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=512 , __lowerCAmelCase=16 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , ):
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_input_mask
UpperCamelCase__ = use_token_type_ids
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = num_labels
UpperCamelCase__ = num_choices
UpperCamelCase__ = scope
UpperCamelCase__ = embedding_size
def _lowerCamelCase ( self ):
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ = None
if self.use_token_type_ids:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = TFMobileBertModel(config=__lowerCAmelCase )
UpperCamelCase__ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCamelCase__ = model(__lowerCAmelCase )
UpperCamelCase__ = [input_ids, input_mask]
UpperCamelCase__ = model(__lowerCAmelCase )
UpperCamelCase__ = model(__lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = TFMobileBertForMaskedLM(config=__lowerCAmelCase )
UpperCamelCase__ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCamelCase__ = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = TFMobileBertForNextSentencePrediction(config=__lowerCAmelCase )
UpperCamelCase__ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCamelCase__ = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = TFMobileBertForPreTraining(config=__lowerCAmelCase )
UpperCamelCase__ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCamelCase__ = model(__lowerCAmelCase )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = TFMobileBertForSequenceClassification(config=__lowerCAmelCase )
UpperCamelCase__ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCamelCase__ = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = self.num_choices
UpperCamelCase__ = TFMobileBertForMultipleChoice(config=__lowerCAmelCase )
UpperCamelCase__ = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
UpperCamelCase__ = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = TFMobileBertForTokenClassification(config=__lowerCAmelCase )
UpperCamelCase__ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCamelCase__ = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = TFMobileBertForQuestionAnswering(config=__lowerCAmelCase )
UpperCamelCase__ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCamelCase__ = model(__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = config_and_inputs
UpperCamelCase__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
def _lowerCamelCase ( self ):
UpperCamelCase__ = TFMobileBertModelTest.TFMobileBertModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def _lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__lowerCAmelCase )
@slow
def _lowerCamelCase ( self ):
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
UpperCamelCase__ = TFMobileBertModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self ):
UpperCamelCase__ = TFMobileBertForPreTraining.from_pretrained("""google/mobilebert-uncased""" )
UpperCamelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCamelCase__ = model(__lowerCAmelCase )[0]
UpperCamelCase__ = [1, 6, 30522]
self.assertEqual(output.shape , __lowerCAmelCase )
UpperCamelCase__ = tf.constant(
[
[
[-4.591_9547, -9.24_8295, -9.64_5256],
[-6.730_6175, -6.44_0284, -6.605_2837],
[-7.274_3506, -6.784_7915, -6.02_4673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCAmelCase , atol=1E-4 )
| 700 |
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
snake_case : Union[str, Any] = MODEL_FOR_CAUSAL_LM_MAPPING
snake_case : List[str] = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def _lowerCamelCase ( self ):
UpperCamelCase__ = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""pt""" )
# Using `do_sample=False` to force deterministic output
UpperCamelCase__ = text_generator("""This is a test""" , do_sample=__lowerCAmelCase )
self.assertEqual(
__lowerCAmelCase , [
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
] , )
UpperCamelCase__ = text_generator(["""This is a test""", """This is a second test"""] )
self.assertEqual(
__lowerCAmelCase , [
[
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"""
""" oscope. oscope. FiliFili@@"""
)
}
],
] , )
UpperCamelCase__ = text_generator("""This is a test""" , do_sample=__lowerCAmelCase , num_return_sequences=2 , return_tensors=__lowerCAmelCase )
self.assertEqual(
__lowerCAmelCase , [
{"""generated_token_ids""": ANY(__lowerCAmelCase )},
{"""generated_token_ids""": ANY(__lowerCAmelCase )},
] , )
UpperCamelCase__ = text_generator.model.config.eos_token_id
UpperCamelCase__ = """<pad>"""
UpperCamelCase__ = text_generator(
["""This is a test""", """This is a second test"""] , do_sample=__lowerCAmelCase , num_return_sequences=2 , batch_size=2 , return_tensors=__lowerCAmelCase , )
self.assertEqual(
__lowerCAmelCase , [
[
{"""generated_token_ids""": ANY(__lowerCAmelCase )},
{"""generated_token_ids""": ANY(__lowerCAmelCase )},
],
[
{"""generated_token_ids""": ANY(__lowerCAmelCase )},
{"""generated_token_ids""": ANY(__lowerCAmelCase )},
],
] , )
@require_tf
def _lowerCamelCase ( self ):
UpperCamelCase__ = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""tf""" )
# Using `do_sample=False` to force deterministic output
UpperCamelCase__ = text_generator("""This is a test""" , do_sample=__lowerCAmelCase )
self.assertEqual(
__lowerCAmelCase , [
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
] , )
UpperCamelCase__ = text_generator(["""This is a test""", """This is a second test"""] , do_sample=__lowerCAmelCase )
self.assertEqual(
__lowerCAmelCase , [
[
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"""
""" Cannes 閲閲Cannes Cannes Cannes 攵 please,"""
)
}
],
] , )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = TextGenerationPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
return text_generator, ["This is a test", "Another test"]
def _lowerCamelCase ( self ):
UpperCamelCase__ = """Hello I believe in"""
UpperCamelCase__ = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" )
UpperCamelCase__ = text_generator(__lowerCAmelCase )
self.assertEqual(
__lowerCAmelCase , [{"""generated_text""": """Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"""}] , )
UpperCamelCase__ = text_generator(__lowerCAmelCase , stop_sequence=""" fe""" )
self.assertEqual(__lowerCAmelCase , [{"""generated_text""": """Hello I believe in fe"""}] )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = text_generator.model
UpperCamelCase__ = text_generator.tokenizer
UpperCamelCase__ = text_generator("""This is a test""" )
self.assertEqual(__lowerCAmelCase , [{"""generated_text""": ANY(__lowerCAmelCase )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
UpperCamelCase__ = text_generator("""This is a test""" , return_full_text=__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , [{"""generated_text""": ANY(__lowerCAmelCase )}] )
self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] )
UpperCamelCase__ = pipeline(task="""text-generation""" , model=__lowerCAmelCase , tokenizer=__lowerCAmelCase , return_full_text=__lowerCAmelCase )
UpperCamelCase__ = text_generator("""This is a test""" )
self.assertEqual(__lowerCAmelCase , [{"""generated_text""": ANY(__lowerCAmelCase )}] )
self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] )
UpperCamelCase__ = text_generator("""This is a test""" , return_full_text=__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , [{"""generated_text""": ANY(__lowerCAmelCase )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
UpperCamelCase__ = text_generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=__lowerCAmelCase )
self.assertEqual(
__lowerCAmelCase , [
[{"""generated_text""": ANY(__lowerCAmelCase )}, {"""generated_text""": ANY(__lowerCAmelCase )}],
[{"""generated_text""": ANY(__lowerCAmelCase )}, {"""generated_text""": ANY(__lowerCAmelCase )}],
] , )
if text_generator.tokenizer.pad_token is not None:
UpperCamelCase__ = text_generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=__lowerCAmelCase )
self.assertEqual(
__lowerCAmelCase , [
[{"""generated_text""": ANY(__lowerCAmelCase )}, {"""generated_text""": ANY(__lowerCAmelCase )}],
[{"""generated_text""": ANY(__lowerCAmelCase )}, {"""generated_text""": ANY(__lowerCAmelCase )}],
] , )
with self.assertRaises(__lowerCAmelCase ):
UpperCamelCase__ = text_generator("""test""" , return_full_text=__lowerCAmelCase , return_text=__lowerCAmelCase )
with self.assertRaises(__lowerCAmelCase ):
UpperCamelCase__ = text_generator("""test""" , return_full_text=__lowerCAmelCase , return_tensors=__lowerCAmelCase )
with self.assertRaises(__lowerCAmelCase ):
UpperCamelCase__ = text_generator("""test""" , return_text=__lowerCAmelCase , return_tensors=__lowerCAmelCase )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
UpperCamelCase__ = text_generator("""""" )
self.assertEqual(__lowerCAmelCase , [{"""generated_text""": ANY(__lowerCAmelCase )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
UpperCamelCase__ = text_generator("""""" )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
UpperCamelCase__ = ["""RwkvForCausalLM""", """XGLMForCausalLM""", """GPTNeoXForCausalLM"""]
if (
tokenizer.model_max_length < 10000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator("""This is a test""" * 500 , max_new_tokens=20 )
UpperCamelCase__ = text_generator("""This is a test""" * 500 , handle_long_generation="""hole""" , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(__lowerCAmelCase ):
text_generator(
"""This is a test""" * 500 , handle_long_generation="""hole""" , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def _lowerCamelCase ( self ):
import torch
# Classic `model_kwargs`
UpperCamelCase__ = pipeline(
model="""hf-internal-testing/tiny-random-bloom""" , model_kwargs={"""device_map""": """auto""", """torch_dtype""": torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
UpperCamelCase__ = pipe("""This is a test""" )
self.assertEqual(
__lowerCAmelCase , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
UpperCamelCase__ = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
UpperCamelCase__ = pipe("""This is a test""" )
self.assertEqual(
__lowerCAmelCase , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
UpperCamelCase__ = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
UpperCamelCase__ = pipe("""This is a test""" )
self.assertEqual(
__lowerCAmelCase , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
@require_torch
@require_torch_gpu
def _lowerCamelCase ( self ):
import torch
UpperCamelCase__ = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device=0 , torch_dtype=torch.floataa )
pipe("""This is a test""" )
@require_torch
@require_accelerate
@require_torch_gpu
def _lowerCamelCase ( self ):
import torch
UpperCamelCase__ = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.floataa )
pipe("""This is a test""" , do_sample=__lowerCAmelCase , top_p=0.5 )
def _lowerCamelCase ( self ):
UpperCamelCase__ = """Hello world"""
UpperCamelCase__ = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" )
if text_generator.model.framework == "tf":
UpperCamelCase__ = logging.get_logger("""transformers.generation.tf_utils""" )
else:
UpperCamelCase__ = logging.get_logger("""transformers.generation.utils""" )
UpperCamelCase__ = """Both `max_new_tokens`""" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(__lowerCAmelCase ) as cl:
UpperCamelCase__ = text_generator(__lowerCAmelCase , max_length=10 , max_new_tokens=1 )
self.assertIn(__lowerCAmelCase , cl.out )
# The user only sets one -> no warning
with CaptureLogger(__lowerCAmelCase ) as cl:
UpperCamelCase__ = text_generator(__lowerCAmelCase , max_new_tokens=1 )
self.assertNotIn(__lowerCAmelCase , cl.out )
with CaptureLogger(__lowerCAmelCase ) as cl:
UpperCamelCase__ = text_generator(__lowerCAmelCase , max_length=10 )
self.assertNotIn(__lowerCAmelCase , cl.out )
| 548 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase__ = {
'''configuration_bridgetower''': [
'''BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BridgeTowerConfig''',
'''BridgeTowerTextConfig''',
'''BridgeTowerVisionConfig''',
],
'''processing_bridgetower''': ['''BridgeTowerProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''BridgeTowerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BridgeTowerForContrastiveLearning''',
'''BridgeTowerForImageAndTextRetrieval''',
'''BridgeTowerForMaskedLM''',
'''BridgeTowerModel''',
'''BridgeTowerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 321 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ : List[str] ={
'''configuration_clip''': [
'''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPConfig''',
'''CLIPOnnxConfig''',
'''CLIPTextConfig''',
'''CLIPVisionConfig''',
],
'''processing_clip''': ['''CLIPProcessor'''],
'''tokenization_clip''': ['''CLIPTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Optional[int] =['''CLIPTokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : int =['''CLIPFeatureExtractor''']
lowerCAmelCase__ : Optional[Any] =['''CLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Dict =[
'''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPModel''',
'''CLIPPreTrainedModel''',
'''CLIPTextModel''',
'''CLIPTextModelWithProjection''',
'''CLIPVisionModel''',
'''CLIPVisionModelWithProjection''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Any =[
'''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCLIPModel''',
'''TFCLIPPreTrainedModel''',
'''TFCLIPTextModel''',
'''TFCLIPVisionModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Dict =[
'''FlaxCLIPModel''',
'''FlaxCLIPPreTrainedModel''',
'''FlaxCLIPTextModel''',
'''FlaxCLIPTextPreTrainedModel''',
'''FlaxCLIPVisionModel''',
'''FlaxCLIPVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 148 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE_ : str ='''huggingface/label-files'''
SCREAMING_SNAKE_CASE_ : Dict ='''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE_ : Optional[int] =json.load(open(hf_hub_download(UpperCAmelCase_ , UpperCAmelCase_ , repo_type='''dataset''' ) , '''r''' ) )
SCREAMING_SNAKE_CASE_ : str ={int(UpperCAmelCase_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ : int ={v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ : Dict ='''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
SCREAMING_SNAKE_CASE_ : List[str] =BitConfig(
conv_layer=UpperCAmelCase_ , num_labels=1_0_0_0 , idalabel=UpperCAmelCase_ , labelaid=UpperCAmelCase_ , )
return config
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : List[Any] ) -> Dict:
if "stem.conv" in name:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
SCREAMING_SNAKE_CASE_ : Tuple =name.replace('''blocks''' , '''layers''' )
if "head.fc" in name:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =name.replace('''head.fc''' , '''classifier.1''' )
if name.startswith('''norm''' ):
SCREAMING_SNAKE_CASE_ : Optional[Any] ='''bit.''' + name
if "bit" not in name and "classifier" not in name:
SCREAMING_SNAKE_CASE_ : Dict ='''bit.encoder.''' + name
return name
def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ : str ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] =Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any]=False ) -> Dict:
SCREAMING_SNAKE_CASE_ : Any =get_config(UpperCAmelCase_ )
# load original model from timm
SCREAMING_SNAKE_CASE_ : List[Any] =create_model(UpperCAmelCase_ , pretrained=UpperCAmelCase_ )
timm_model.eval()
# load state_dict of original model
SCREAMING_SNAKE_CASE_ : Any =timm_model.state_dict()
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE_ : str =state_dict.pop(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] =val.squeeze() if '''head''' in key else val
# load HuggingFace model
SCREAMING_SNAKE_CASE_ : Tuple =BitForImageClassification(UpperCAmelCase_ )
model.eval()
model.load_state_dict(UpperCAmelCase_ )
# create image processor
SCREAMING_SNAKE_CASE_ : Any =create_transform(**resolve_data_config({} , model=UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE_ : str =transform.transforms
SCREAMING_SNAKE_CASE_ : str ={
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
SCREAMING_SNAKE_CASE_ : Dict =BitImageProcessor(
do_resize=UpperCAmelCase_ , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=UpperCAmelCase_ , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=UpperCAmelCase_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
SCREAMING_SNAKE_CASE_ : int =prepare_img()
SCREAMING_SNAKE_CASE_ : int =transform(UpperCAmelCase_ ).unsqueeze(0 )
SCREAMING_SNAKE_CASE_ : Optional[Any] =processor(UpperCAmelCase_ , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ )
# verify logits
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : List[Any] =model(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Dict =outputs.logits
print('''Logits:''' , logits[0, :3] )
print('''Predicted class:''' , model.config.idalabel[logits.argmax(-1 ).item()] )
SCREAMING_SNAKE_CASE_ : Dict =timm_model(UpperCAmelCase_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(UpperCAmelCase_ , outputs.logits , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(UpperCAmelCase_ ).mkdir(exist_ok=UpperCAmelCase_ )
print(f'Saving model {model_name} and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCAmelCase_ )
processor.save_pretrained(UpperCAmelCase_ )
if push_to_hub:
print(f'Pushing model {model_name} and processor to the hub' )
model.push_to_hub(f'ybelkada/{model_name}' )
processor.push_to_hub(f'ybelkada/{model_name}' )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""resnetv2_50x1_bitm""",
type=str,
help="""Name of the BiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model to the hub.""",
)
_lowercase = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 431 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_lowercase = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any ) -> List[Any]:
return (preds == labels).mean()
@dataclass
class lowercase_ :
__lowerCamelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__lowerCamelCase = field(
default=A , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__lowerCamelCase = field(
default=A , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
__lowerCamelCase = field(
default=A , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class lowercase_ :
__lowerCamelCase = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
__lowerCamelCase = field(metadata={"help": "Should contain the data files for the task."} )
__lowerCamelCase = field(
default=1_2_8 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__lowerCamelCase = field(
default=A , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def SCREAMING_SNAKE_CASE_ ( ) -> List[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE_ : Optional[Any] =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any =parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , UpperCAmelCase_ )
# Set seed
set_seed(training_args.seed )
try:
SCREAMING_SNAKE_CASE_ : List[Any] =processors[data_args.task_name]()
SCREAMING_SNAKE_CASE_ : Optional[Any] =processor.get_labels()
SCREAMING_SNAKE_CASE_ : int =len(UpperCAmelCase_ )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE_ : Optional[Any] =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCAmelCase_ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE_ : List[str] =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE_ : int =AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=UpperCAmelCase_ , cache_dir=model_args.cache_dir , )
# Get datasets
SCREAMING_SNAKE_CASE_ : List[Any] =(
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=UpperCAmelCase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
SCREAMING_SNAKE_CASE_ : Optional[Any] =(
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=UpperCAmelCase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(UpperCAmelCase_ : EvalPrediction ) -> Dict:
SCREAMING_SNAKE_CASE_ : Tuple =np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(UpperCAmelCase_ , p.label_ids )}
# Data collator
SCREAMING_SNAKE_CASE_ : Dict =DataCollatorWithPadding(UpperCAmelCase_ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
SCREAMING_SNAKE_CASE_ : str =Trainer(
model=UpperCAmelCase_ , args=UpperCAmelCase_ , train_dataset=UpperCAmelCase_ , eval_dataset=UpperCAmelCase_ , compute_metrics=UpperCAmelCase_ , data_collator=UpperCAmelCase_ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
SCREAMING_SNAKE_CASE_ : Dict ={}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
SCREAMING_SNAKE_CASE_ : Dict =trainer.evaluate()
SCREAMING_SNAKE_CASE_ : Any =os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_master():
with open(UpperCAmelCase_ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , UpperCAmelCase_ , UpperCAmelCase_ )
writer.write('''%s = %s\n''' % (key, value) )
results.update(UpperCAmelCase_ )
return results
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : str ) -> Union[str, Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 431 | 1 |
'''simple docstring'''
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse("3.8"):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def __a(SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple=False ):
'''simple docstring'''
try:
_lowerCAmelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_lowerCAmelCase = default
else:
# KEY is set, convert it to True or False.
try:
_lowerCAmelCase = strtobool(SCREAMING_SNAKE_CASE_ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F'''If set, {key} must be yes or no.''' )
return _value
_SCREAMING_SNAKE_CASE = parse_flag_from_env("RUN_SLOW", default=False)
_SCREAMING_SNAKE_CASE = parse_flag_from_env("RUN_REMOTE", default=False)
_SCREAMING_SNAKE_CASE = parse_flag_from_env("RUN_LOCAL", default=True)
_SCREAMING_SNAKE_CASE = parse_flag_from_env("RUN_PACKAGED", default=True)
# Compression
_SCREAMING_SNAKE_CASE = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="test requires lz4")
_SCREAMING_SNAKE_CASE = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="test requires py7zr")
_SCREAMING_SNAKE_CASE = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="test requires zstandard")
# Audio
_SCREAMING_SNAKE_CASE = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec("soundfile") is None or version.parse(importlib_metadata.version("soundfile")) < version.parse("0.12.0"),
reason="test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; ",
)
# Beam
_SCREAMING_SNAKE_CASE = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("0.3.2"),
reason="test requires apache-beam and a compatible dill version",
)
# Dill-cloudpickle compatibility
_SCREAMING_SNAKE_CASE = pytest.mark.skipif(
config.DILL_VERSION <= version.parse("0.3.2"),
reason="test requires dill>0.3.2 for cloudpickle compatibility",
)
# Windows
_SCREAMING_SNAKE_CASE = pytest.mark.skipif(
sys.platform == "win32",
reason="test should not be run on Windows",
)
def __a(SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
try:
import faiss # noqa
except ImportError:
_lowerCAmelCase = unittest.skip("test requires faiss" )(SCREAMING_SNAKE_CASE_ )
return test_case
def __a(SCREAMING_SNAKE_CASE_ : Any ):
'''simple docstring'''
try:
import regex # noqa
except ImportError:
_lowerCAmelCase = unittest.skip("test requires regex" )(SCREAMING_SNAKE_CASE_ )
return test_case
def __a(SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
'''simple docstring'''
try:
import elasticsearch # noqa
except ImportError:
_lowerCAmelCase = unittest.skip("test requires elasticsearch" )(SCREAMING_SNAKE_CASE_ )
return test_case
def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] ):
'''simple docstring'''
try:
import sqlalchemy # noqa
except ImportError:
_lowerCAmelCase = unittest.skip("test requires sqlalchemy" )(SCREAMING_SNAKE_CASE_ )
return test_case
def __a(SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
if not config.TORCH_AVAILABLE:
_lowerCAmelCase = unittest.skip("test requires PyTorch" )(SCREAMING_SNAKE_CASE_ )
return test_case
def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] ):
'''simple docstring'''
if not config.TF_AVAILABLE:
_lowerCAmelCase = unittest.skip("test requires TensorFlow" )(SCREAMING_SNAKE_CASE_ )
return test_case
def __a(SCREAMING_SNAKE_CASE_ : Tuple ):
'''simple docstring'''
if not config.JAX_AVAILABLE:
_lowerCAmelCase = unittest.skip("test requires JAX" )(SCREAMING_SNAKE_CASE_ )
return test_case
def __a(SCREAMING_SNAKE_CASE_ : Dict ):
'''simple docstring'''
if not config.PIL_AVAILABLE:
_lowerCAmelCase = unittest.skip("test requires Pillow" )(SCREAMING_SNAKE_CASE_ )
return test_case
def __a(SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("test requires transformers" )(SCREAMING_SNAKE_CASE_ )
else:
return test_case
def __a(SCREAMING_SNAKE_CASE_ : Any ):
'''simple docstring'''
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("test requires tiktoken" )(SCREAMING_SNAKE_CASE_ )
else:
return test_case
def __a(SCREAMING_SNAKE_CASE_ : List[str] ):
'''simple docstring'''
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("test requires spacy" )(SCREAMING_SNAKE_CASE_ )
else:
return test_case
def __a(SCREAMING_SNAKE_CASE_ : Optional[int] ):
'''simple docstring'''
def _require_spacy_model(SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
try:
import spacy # noqa F401
spacy.load(SCREAMING_SNAKE_CASE_ )
except ImportError:
return unittest.skip("test requires spacy" )(SCREAMING_SNAKE_CASE_ )
except OSError:
return unittest.skip("test requires spacy model '{}'".format(SCREAMING_SNAKE_CASE_ ) )(SCREAMING_SNAKE_CASE_ )
else:
return test_case
return _require_spacy_model
def __a(SCREAMING_SNAKE_CASE_ : Optional[int] ):
'''simple docstring'''
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("test requires pyspark" )(SCREAMING_SNAKE_CASE_ )
else:
return test_case
def __a(SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
'''simple docstring'''
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("test requires joblibspark" )(SCREAMING_SNAKE_CASE_ )
else:
return test_case
def __a(SCREAMING_SNAKE_CASE_ : Optional[int] ):
'''simple docstring'''
if not _run_slow_tests or _run_slow_tests == 0:
_lowerCAmelCase = unittest.skip("test is slow" )(SCREAMING_SNAKE_CASE_ )
return test_case
def __a(SCREAMING_SNAKE_CASE_ : Dict ):
'''simple docstring'''
if not _run_local_tests or _run_local_tests == 0:
_lowerCAmelCase = unittest.skip("test is local" )(SCREAMING_SNAKE_CASE_ )
return test_case
def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] ):
'''simple docstring'''
if not _run_packaged_tests or _run_packaged_tests == 0:
_lowerCAmelCase = unittest.skip("test is packaged" )(SCREAMING_SNAKE_CASE_ )
return test_case
def __a(SCREAMING_SNAKE_CASE_ : Any ):
'''simple docstring'''
if not _run_remote_tests or _run_remote_tests == 0:
_lowerCAmelCase = unittest.skip("test requires remote" )(SCREAMING_SNAKE_CASE_ )
return test_case
def __a(*SCREAMING_SNAKE_CASE_ : Optional[Any] ):
'''simple docstring'''
def decorate(cls : Any ):
for name, fn in cls.__dict__.items():
if callable(SCREAMING_SNAKE_CASE_ ) and name.startswith("test" ):
for decorator in decorators:
_lowerCAmelCase = decorator(SCREAMING_SNAKE_CASE_ )
setattr(cls , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return cls
return decorate
class lowerCAmelCase_ ( __magic_name__ ):
pass
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Any = 0
__lowerCamelCase : Any = 1
__lowerCamelCase : Optional[int] = 2
@contextmanager
def __a(SCREAMING_SNAKE_CASE_ : List[str]=OfflineSimulationMode.CONNECTION_FAILS , SCREAMING_SNAKE_CASE_ : Dict=1e-16 ):
'''simple docstring'''
_lowerCAmelCase = requests.Session().request
def timeout_request(SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
# Change the url to an invalid url so that the connection hangs
_lowerCAmelCase = "https://10.255.255.1"
if kwargs.get("timeout" ) is None:
raise RequestWouldHangIndefinitelyError(
F'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' )
_lowerCAmelCase = timeout
try:
return online_request(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
_lowerCAmelCase = url
_lowerCAmelCase = e.args[0]
_lowerCAmelCase = (max_retry_error.args[0].replace("10.255.255.1" , F'''OfflineMock[{url}]''' ),)
_lowerCAmelCase = (max_retry_error,)
raise
def raise_connection_error(SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : int ):
raise requests.ConnectionError("Offline mode is enabled." , request=SCREAMING_SNAKE_CASE_ )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("requests.Session.send" , SCREAMING_SNAKE_CASE_ ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("requests.Session.request" , SCREAMING_SNAKE_CASE_ ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("datasets.config.HF_DATASETS_OFFLINE" , SCREAMING_SNAKE_CASE_ ):
yield
else:
raise ValueError("Please use a value from the OfflineSimulationMode enum." )
@contextmanager
def __a(*SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase = str(Path().resolve() )
with tempfile.TemporaryDirectory(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) as tmp_dir:
try:
os.chdir(SCREAMING_SNAKE_CASE_ )
yield
finally:
os.chdir(SCREAMING_SNAKE_CASE_ )
@contextmanager
def __a():
'''simple docstring'''
import gc
gc.collect()
_lowerCAmelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def __a():
'''simple docstring'''
import gc
gc.collect()
_lowerCAmelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Tuple ):
'''simple docstring'''
return deepcopy(SCREAMING_SNAKE_CASE_ ).integers(0 , 100 , 10 ).tolist() == deepcopy(SCREAMING_SNAKE_CASE_ ).integers(0 , 100 , 10 ).tolist()
def __a(SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
'''simple docstring'''
import decorator
from requests.exceptions import HTTPError
def _wrapper(SCREAMING_SNAKE_CASE_ : Dict , *SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
try:
return func(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
except HTTPError as err:
if str(SCREAMING_SNAKE_CASE_ ).startswith("500" ) or str(SCREAMING_SNAKE_CASE_ ).startswith("502" ):
pytest.xfail(str(SCREAMING_SNAKE_CASE_ ) )
raise err
return decorator.decorator(_wrapper , SCREAMING_SNAKE_CASE_ )
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
_lowerCAmelCase = returncode
_lowerCAmelCase = stdout
_lowerCAmelCase = stderr
async def __a(SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict ):
'''simple docstring'''
while True:
_lowerCAmelCase = await stream.readline()
if line:
callback(SCREAMING_SNAKE_CASE_ )
else:
break
async def __a(SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any=None , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : int=False , SCREAMING_SNAKE_CASE_ : Tuple=False ):
'''simple docstring'''
if echo:
print("\nRunning: " , " ".join(SCREAMING_SNAKE_CASE_ ) )
_lowerCAmelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=SCREAMING_SNAKE_CASE_ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=SCREAMING_SNAKE_CASE_ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_lowerCAmelCase = []
_lowerCAmelCase = []
def tee(SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : int="" ):
_lowerCAmelCase = line.decode("utf-8" ).rstrip()
sink.append(SCREAMING_SNAKE_CASE_ )
if not quiet:
print(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , file=SCREAMING_SNAKE_CASE_ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda SCREAMING_SNAKE_CASE_ : tee(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , sys.stdout , label="stdout:" ) ),
_read_stream(p.stderr , lambda SCREAMING_SNAKE_CASE_ : tee(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , sys.stderr , label="stderr:" ) ),
] , timeout=SCREAMING_SNAKE_CASE_ , )
return _RunOutput(await p.wait() , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __a(SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : str=180 , SCREAMING_SNAKE_CASE_ : Optional[Any]=False , SCREAMING_SNAKE_CASE_ : int=True ):
'''simple docstring'''
_lowerCAmelCase = asyncio.get_event_loop()
_lowerCAmelCase = loop.run_until_complete(
_stream_subprocess(SCREAMING_SNAKE_CASE_ , env=SCREAMING_SNAKE_CASE_ , stdin=SCREAMING_SNAKE_CASE_ , timeout=SCREAMING_SNAKE_CASE_ , quiet=SCREAMING_SNAKE_CASE_ , echo=SCREAMING_SNAKE_CASE_ ) )
_lowerCAmelCase = " ".join(SCREAMING_SNAKE_CASE_ )
if result.returncode > 0:
_lowerCAmelCase = "\n".join(result.stderr )
raise RuntimeError(
F'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
F'''The combined stderr from workers follows:\n{stderr}''' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F'''\'{cmd_str}\' produced no output.''' )
return result
def __a():
'''simple docstring'''
_lowerCAmelCase = os.environ.get("PYTEST_XDIST_WORKER" , "gw0" )
_lowerCAmelCase = re.sub(R"^gw" , "" , SCREAMING_SNAKE_CASE_ , 0 , re.M )
return int(SCREAMING_SNAKE_CASE_ )
def __a():
'''simple docstring'''
_lowerCAmelCase = 29500
_lowerCAmelCase = pytest_xdist_worker_id()
return port + uniq_delta
| 18 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowercase =logging.get_logger(__name__)
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase =["input_features", "attention_mask"]
def __init__( self , snake_case=8_0 , snake_case=1_6_0_0_0 , snake_case=8_0 , snake_case=0.0 , snake_case=True , snake_case=True , snake_case=True , **snake_case , ) -> str:
'''simple docstring'''
super().__init__(feature_size=snake_case , sampling_rate=snake_case , padding_value=snake_case , **snake_case)
_UpperCAmelCase : Optional[Any] =num_mel_bins
_UpperCAmelCase : Optional[int] =do_ceptral_normalize
_UpperCAmelCase : Optional[Any] =normalize_means
_UpperCAmelCase : Optional[Any] =normalize_vars
_UpperCAmelCase : Tuple =True
def lowerCAmelCase ( self , snake_case , ) -> np.ndarray:
'''simple docstring'''
_UpperCAmelCase : List[Any] =waveform * (2**1_5) # Kaldi compliance: 16-bit signed integers
_UpperCAmelCase : Dict =torch.from_numpy(snake_case).unsqueeze(0)
_UpperCAmelCase : Union[str, Any] =ta_kaldi.fbank(snake_case , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate)
return features.numpy()
@staticmethod
def lowerCAmelCase ( snake_case , snake_case , snake_case = True , snake_case = True , snake_case = 0.0 , ) -> np.ndarray:
'''simple docstring'''
# make sure we normalize float32 arrays
if normalize_means:
_UpperCAmelCase : int =x[:input_length].mean(axis=0)
_UpperCAmelCase : List[Any] =np.subtract(snake_case , snake_case)
if normalize_vars:
_UpperCAmelCase : List[Any] =x[:input_length].std(axis=0)
_UpperCAmelCase : Optional[int] =np.divide(snake_case , snake_case)
if input_length < x.shape[0]:
_UpperCAmelCase : Dict =padding_value
# make sure array is in float32
_UpperCAmelCase : str =x.astype(np.floataa)
return x
def lowerCAmelCase ( self , snake_case , snake_case = None) -> List[np.ndarray]:
'''simple docstring'''
_UpperCAmelCase : str =attention_mask.sum(-1) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(snake_case , snake_case , self.normalize_means , self.normalize_vars , self.padding_value)
for x, n in zip(snake_case , snake_case)
]
def __call__( self , snake_case , snake_case = False , snake_case = None , snake_case = False , snake_case = None , snake_case = None , snake_case = None , snake_case = None , **snake_case , ) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}.")
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.')
_UpperCAmelCase : Optional[int] =isinstance(snake_case , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}")
_UpperCAmelCase : List[Any] =is_batched_numpy or (
isinstance(snake_case , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
_UpperCAmelCase : int =[np.asarray(snake_case , dtype=np.floataa) for speech in raw_speech]
elif not is_batched and not isinstance(snake_case , np.ndarray):
_UpperCAmelCase : Tuple =np.asarray(snake_case , dtype=np.floataa)
elif isinstance(snake_case , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
_UpperCAmelCase : int =raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
_UpperCAmelCase : Dict =[raw_speech]
# extract fbank features
_UpperCAmelCase : Optional[Any] =[self._extract_fbank_features(snake_case) for waveform in raw_speech]
# convert into correct format for padding
_UpperCAmelCase : List[str] =BatchFeature({'input_features': features})
_UpperCAmelCase : Any =self.pad(
snake_case , padding=snake_case , max_length=snake_case , truncation=snake_case , pad_to_multiple_of=snake_case , return_attention_mask=snake_case , **snake_case , )
# make sure list is in array format
_UpperCAmelCase : Dict =padded_inputs.get('input_features')
if isinstance(input_features[0] , snake_case):
_UpperCAmelCase : Any =[np.asarray(snake_case , dtype=np.floataa) for feature in input_features]
_UpperCAmelCase : int =padded_inputs.get('attention_mask')
if attention_mask is not None:
_UpperCAmelCase : Tuple =[np.asarray(snake_case , dtype=np.intaa) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
_UpperCAmelCase : Optional[Any] =(
np.array(snake_case , dtype=np.intaa)
if self._get_padding_strategies(snake_case , max_length=snake_case) is not PaddingStrategy.DO_NOT_PAD
else None
)
_UpperCAmelCase : Optional[int] =self.normalize(
padded_inputs['input_features'] , attention_mask=snake_case)
if return_tensors is not None:
_UpperCAmelCase : List[Any] =padded_inputs.convert_to_tensors(snake_case)
return padded_inputs
| 446 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_a : List[str] = logging.getLogger(__name__)
@dataclass
class _UpperCAmelCase :
__lowercase : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""})
__lowercase : Optional[str] = field(
default=__UpperCAmelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""})
__lowercase : Optional[str] = field(
default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""})
__lowercase : Optional[str] = field(
default=__UpperCAmelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""})
__lowercase : bool = field(default=__UpperCAmelCase , metadata={"""help""": """Set this flag to use fast tokenization."""})
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
__lowercase : Optional[str] = field(
default=__UpperCAmelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class _UpperCAmelCase :
__lowercase : str = field(
metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""})
__lowercase : Optional[str] = field(
default=__UpperCAmelCase , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , )
__lowercase : int = field(
default=1_2_8 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__lowercase : bool = field(
default=__UpperCAmelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""})
def a__ ( ):
"""simple docstring"""
_snake_case : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_snake_case , _snake_case , _snake_case : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_snake_case , _snake_case , _snake_case : str = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
" --overwrite_output_dir to overcome." )
_snake_case : List[str] = import_module("tasks" )
try:
_snake_case : Optional[Any] = getattr(_lowerCAmelCase , model_args.task_type )
_snake_case : List[str] = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '
f'Available tasks classes are: {TokenClassificationTask.__subclasses__()}' )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , _lowerCAmelCase )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
_snake_case : Union[str, Any] = token_classification_task.get_labels(data_args.labels )
_snake_case : List[Any] = dict(enumerate(_lowerCAmelCase ) )
_snake_case : Optional[Any] = len(_lowerCAmelCase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_snake_case : Union[str, Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowerCAmelCase , idalabel=_lowerCAmelCase , labelaid={label: i for i, label in enumerate(_lowerCAmelCase )} , cache_dir=model_args.cache_dir , )
_snake_case : Tuple = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
_snake_case : Any = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_lowerCAmelCase , cache_dir=model_args.cache_dir , )
# Get datasets
_snake_case : Tuple = (
TokenClassificationDataset(
token_classification_task=_lowerCAmelCase , data_dir=data_args.data_dir , tokenizer=_lowerCAmelCase , labels=_lowerCAmelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
_snake_case : List[str] = (
TokenClassificationDataset(
token_classification_task=_lowerCAmelCase , data_dir=data_args.data_dir , tokenizer=_lowerCAmelCase , labels=_lowerCAmelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(a : str , a : int ) -> Tuple[List[int], List[int]]:
_snake_case : Any = np.argmax(_lowerCAmelCase , axis=2 )
_snake_case , _snake_case : int = preds.shape
_snake_case : List[Any] = [[] for _ in range(_lowerCAmelCase )]
_snake_case : List[Any] = [[] for _ in range(_lowerCAmelCase )]
for i in range(_lowerCAmelCase ):
for j in range(_lowerCAmelCase ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(a : str ) -> Dict:
_snake_case , _snake_case : str = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(_lowerCAmelCase , _lowerCAmelCase ),
"precision": precision_score(_lowerCAmelCase , _lowerCAmelCase ),
"recall": recall_score(_lowerCAmelCase , _lowerCAmelCase ),
"f1": fa_score(_lowerCAmelCase , _lowerCAmelCase ),
}
# Data collator
_snake_case : int = DataCollatorWithPadding(_lowerCAmelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
_snake_case : Optional[int] = Trainer(
model=_lowerCAmelCase , args=_lowerCAmelCase , train_dataset=_lowerCAmelCase , eval_dataset=_lowerCAmelCase , compute_metrics=_lowerCAmelCase , data_collator=_lowerCAmelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_snake_case : Tuple = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_snake_case : List[Any] = trainer.evaluate()
_snake_case : Union[str, Any] = os.path.join(training_args.output_dir , "eval_results.txt" )
if trainer.is_world_process_zero():
with open(_lowerCAmelCase , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(" %s = %s" , _lowerCAmelCase , _lowerCAmelCase )
writer.write("%s = %s\n" % (key, value) )
results.update(_lowerCAmelCase )
# Predict
if training_args.do_predict:
_snake_case : Dict = TokenClassificationDataset(
token_classification_task=_lowerCAmelCase , data_dir=data_args.data_dir , tokenizer=_lowerCAmelCase , labels=_lowerCAmelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
_snake_case , _snake_case , _snake_case : List[str] = trainer.predict(_lowerCAmelCase )
_snake_case , _snake_case : List[Any] = align_predictions(_lowerCAmelCase , _lowerCAmelCase )
_snake_case : int = os.path.join(training_args.output_dir , "test_results.txt" )
if trainer.is_world_process_zero():
with open(_lowerCAmelCase , "w" ) as writer:
for key, value in metrics.items():
logger.info(" %s = %s" , _lowerCAmelCase , _lowerCAmelCase )
writer.write("%s = %s\n" % (key, value) )
# Save predictions
_snake_case : Optional[int] = os.path.join(training_args.output_dir , "test_predictions.txt" )
if trainer.is_world_process_zero():
with open(_lowerCAmelCase , "w" ) as writer:
with open(os.path.join(data_args.data_dir , "test.txt" ) , "r" ) as f:
token_classification_task.write_predictions_to_file(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return results
def a__ ( a : List[Any] ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 700 |
"""simple docstring"""
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class _UpperCAmelCase ( unittest.TestCase):
def lowerCamelCase__ ( self ):
_snake_case : List[Any] = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
_snake_case : List[Any] = Vector()
def lowerCamelCase__ ( self ):
_snake_case : Any = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(snake_case_ ) , "(0,0,0,0,0,1)" )
def lowerCamelCase__ ( self ):
_snake_case : Dict = Vector([1, 2, 3, 4] )
self.assertEqual(len(snake_case_ ) , 4 )
def lowerCamelCase__ ( self ):
_snake_case : List[Any] = Vector([1, 2] )
_snake_case : List[str] = Vector([1, 2, 3, 4, 5] )
_snake_case : List[Any] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
_snake_case : Any = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def lowerCamelCase__ ( self ):
_snake_case : List[Any] = Vector([1, 2, 3] )
_snake_case : Any = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def lowerCamelCase__ ( self ):
_snake_case : str = Vector([1, 2, 3] )
_snake_case : Union[str, Any] = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def lowerCamelCase__ ( self ):
_snake_case : Optional[int] = Vector([1, 2, 3] )
_snake_case : List[Any] = Vector([2, -1, 4] ) # for test of dot product
_snake_case : Union[str, Any] = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , "(3.0,6.0,9.0)" )
self.assertEqual((a * b) , 0 )
def lowerCamelCase__ ( self ):
self.assertEqual(str(zero_vector(10 ) ).count("0" ) , 10 )
def lowerCamelCase__ ( self ):
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , "(0,1,0)" )
def lowerCamelCase__ ( self ):
_snake_case : Tuple = Vector([1, 2, 3] )
_snake_case : Optional[Any] = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , snake_case_ , snake_case_ ) ) , "(3,4,7)" )
def lowerCamelCase__ ( self ):
_snake_case : Union[str, Any] = Vector([1, 0, 0, 0, 0, 0] )
_snake_case : Optional[int] = x.copy()
self.assertEqual(str(snake_case_ ) , str(snake_case_ ) )
def lowerCamelCase__ ( self ):
_snake_case : Dict = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(snake_case_ ) , "(0,1,0)" )
def lowerCamelCase__ ( self ):
_snake_case : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("|1,2,3|\n|2,4,5|\n|6,7,8|\n" , str(snake_case_ ) )
def lowerCamelCase__ ( self ):
_snake_case : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_snake_case : str = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(snake_case_ , snake_case_ ) )
def lowerCamelCase__ ( self ):
_snake_case : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_snake_case : Optional[Any] = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(snake_case_ , snake_case_ ) )
def lowerCamelCase__ ( self ):
_snake_case : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def lowerCamelCase__ ( self ):
_snake_case : str = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
_snake_case : List[str] = Vector([1, 2, 3] )
self.assertEqual("(14,32,50)" , str(a * x ) )
self.assertEqual("|2,4,6|\n|8,10,12|\n|14,16,18|\n" , str(a * 2 ) )
def lowerCamelCase__ ( self ):
_snake_case : Optional[int] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("|1,2,5|\n|2,4,5|\n|6,7,8|\n" , str(snake_case_ ) )
def lowerCamelCase__ ( self ):
_snake_case : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def lowerCamelCase__ ( self ):
_snake_case : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_snake_case : int = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("|2,4,10|\n|4,8,10|\n|12,14,18|\n" , str(a + b ) )
def lowerCamelCase__ ( self ):
_snake_case : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_snake_case : Optional[Any] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("|0,0,-4|\n|0,0,0|\n|0,0,-2|\n" , str(a - b ) )
def lowerCamelCase__ ( self ):
self.assertEqual(
"|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 87 | 0 |
"""simple docstring"""
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class A__ ( ctypes.Structure):
"""simple docstring"""
# _fields is a specific attr expected by ctypes
snake_case__ : Optional[Any] =[('''size''', ctypes.c_int), ('''visible''', ctypes.c_byte)]
def snake_case ( ) -> List[Any]:
if os.name == "nt":
lowerCamelCase : Tuple = CursorInfo()
lowerCamelCase : List[Any] = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCamelCase__ , ctypes.byref(UpperCamelCase__ ) )
lowerCamelCase : List[str] = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCamelCase__ , ctypes.byref(UpperCamelCase__ ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25l""" )
sys.stdout.flush()
def snake_case ( ) -> Union[str, Any]:
if os.name == "nt":
lowerCamelCase : List[str] = CursorInfo()
lowerCamelCase : List[Any] = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCamelCase__ , ctypes.byref(UpperCamelCase__ ) )
lowerCamelCase : List[str] = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCamelCase__ , ctypes.byref(UpperCamelCase__ ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25h""" )
sys.stdout.flush()
@contextmanager
def snake_case ( ) -> Tuple:
try:
hide_cursor()
yield
finally:
show_cursor()
| 222 |
"""simple docstring"""
from __future__ import annotations
def snake_case ( UpperCamelCase__ : tuple[int, int] , UpperCamelCase__ : int ) -> list[tuple[int, int]]:
lowerCamelCase , lowerCamelCase : Optional[int] = position
lowerCamelCase : Any = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
lowerCamelCase : Optional[Any] = []
for position in positions:
lowerCamelCase , lowerCamelCase : Dict = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(UpperCamelCase__ )
return permissible_positions
def snake_case ( UpperCamelCase__ : list[list[int]] ) -> bool:
return not any(elem == 0 for row in board for elem in row )
def snake_case ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : tuple[int, int] , UpperCamelCase__ : int ) -> bool:
if is_complete(UpperCamelCase__ ):
return True
for position in get_valid_pos(UpperCamelCase__ , len(UpperCamelCase__ ) ):
lowerCamelCase , lowerCamelCase : Union[str, Any] = position
if board[y][x] == 0:
lowerCamelCase : List[Any] = curr + 1
if open_knight_tour_helper(UpperCamelCase__ , UpperCamelCase__ , curr + 1 ):
return True
lowerCamelCase : int = 0
return False
def snake_case ( UpperCamelCase__ : int ) -> list[list[int]]:
lowerCamelCase : List[str] = [[0 for i in range(UpperCamelCase__ )] for j in range(UpperCamelCase__ )]
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
lowerCamelCase : Any = 1
if open_knight_tour_helper(UpperCamelCase__ , (i, j) , 1 ):
return board
lowerCamelCase : Optional[Any] = 0
lowerCamelCase : List[Any] = F'Open Kight Tour cannot be performed on a board of size {n}'
raise ValueError(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 222 | 1 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
_SCREAMING_SNAKE_CASE = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = True , ) -> Tuple:
_A = [file for file in os.listdir(lowerCAmelCase_ ) if os.path.isfile(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) )]
if identifier is not None:
_A = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
for n_ in n_identifier:
_A = [file for file in files if n_ not in file]
else:
_A = [file for file in files if n_identifier not in file]
_A = ignore_files or []
ignore_files.append("""__init__.py""" )
_A = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("""Testing""" , lowerCAmelCase_ )
if only_modules:
_A = file.split(""".""" )[0]
try:
_A = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
_A = doctest.DocTestSuite(lowerCAmelCase_ )
_A = unittest.TextTestRunner().run(lowerCAmelCase_ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F'''{module_identifier} is not a module.''' )
else:
_A = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def UpperCAmelCase ( self ) -> Any:
_A = Path("""src/transformers""" )
_A = """modeling"""
_A = [
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(lowerCAmelCase_ , identifier=lowerCAmelCase_ , ignore_files=lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> List[Any]:
_A = Path("""src/transformers""" )
_A = """tokenization"""
self.analyze_directory(lowerCAmelCase_ , identifier=lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = Path("""src/transformers""" )
_A = """configuration"""
self.analyze_directory(lowerCAmelCase_ , identifier=lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Any:
_A = Path("""src/transformers""" )
_A = ["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(lowerCAmelCase_ , n_identifier=lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = Path("""docs/source""" )
_A = ["""favicon.ico"""]
self.analyze_directory(lowerCAmelCase_ , ignore_files=lowerCAmelCase_ , only_modules=lowerCAmelCase_ )
| 83 | import heapq
def snake_case ( snake_case__ :dict) -> set[int]:
_A = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(snake_case__ , [-1 * len(snake_case__), (key, value)])
# chosen_vertices = set of chosen vertices
_A = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
_A = heapq.heappop(snake_case__)[1][0]
chosen_vertices.add(snake_case__)
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
_A = elem[1][1].index(snake_case__)
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(snake_case__)
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_SCREAMING_SNAKE_CASE = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F'''Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}''')
| 83 | 1 |
"""simple docstring"""
from collections import defaultdict
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> bool:
"""simple docstring"""
lowerCAmelCase_ : str = first_str.lower().strip()
lowerCAmelCase_ : List[Any] = second_str.lower().strip()
# Remove whitespace
lowerCAmelCase_ : List[Any] = first_str.replace(" " , "" )
lowerCAmelCase_ : Union[str, Any] = second_str.replace(" " , "" )
# Strings of different lengths are not anagrams
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
return False
# Default values for count should be 0
lowerCAmelCase_ : defaultdict[str, int] = defaultdict(__UpperCamelCase )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(__UpperCamelCase ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
lowercase__ = input("""Enter the first string """).strip()
lowercase__ = input("""Enter the second string """).strip()
lowercase__ = check_anagrams(input_a, input_b)
print(F"""{input_a} and {input_b} are {'' if status else 'not '}anagrams.""")
| 610 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCamelCase ( A__ ):
'''simple docstring'''
a_ : Optional[int] = ["""image_processor""", """tokenizer"""]
a_ : Union[str, Any] = """ViltImageProcessor"""
a_ : Dict = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : List[Any] , a_ : Optional[int]=None , a_ : Optional[Any]=None , **a_ : str ):
lowerCAmelCase_ : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , a_ , )
lowerCAmelCase_ : Tuple = kwargs.pop("feature_extractor" )
lowerCAmelCase_ : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(a_ , a_ )
lowerCAmelCase_ : str = self.image_processor
def __call__( self : int , a_ : List[Any] , a_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a_ : bool = True , a_ : Union[bool, str, PaddingStrategy] = False , a_ : Union[bool, str, TruncationStrategy] = None , a_ : Optional[int] = None , a_ : int = 0 , a_ : Optional[int] = None , a_ : Optional[bool] = None , a_ : Optional[bool] = None , a_ : bool = False , a_ : bool = False , a_ : bool = False , a_ : bool = False , a_ : bool = True , a_ : Optional[Union[str, TensorType]] = None , **a_ : Optional[Any] , ):
lowerCAmelCase_ : Dict = self.tokenizer(
text=a_ , add_special_tokens=a_ , padding=a_ , truncation=a_ , max_length=a_ , stride=a_ , pad_to_multiple_of=a_ , return_token_type_ids=a_ , return_attention_mask=a_ , return_overflowing_tokens=a_ , return_special_tokens_mask=a_ , return_offsets_mapping=a_ , return_length=a_ , verbose=a_ , return_tensors=a_ , **a_ , )
# add pixel_values + pixel_mask
lowerCAmelCase_ : Tuple = self.image_processor(a_ , return_tensors=a_ )
encoding.update(a_ )
return encoding
def lowerCamelCase ( self : Union[str, Any] , *a_ : Dict , **a_ : Union[str, Any] ):
return self.tokenizer.batch_decode(*a_ , **a_ )
def lowerCamelCase ( self : Optional[Any] , *a_ : List[str] , **a_ : Any ):
return self.tokenizer.decode(*a_ , **a_ )
@property
def lowerCamelCase ( self : Dict ):
lowerCAmelCase_ : Tuple = self.tokenizer.model_input_names
lowerCAmelCase_ : Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCamelCase ( self : Union[str, Any] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , a_ , )
return self.image_processor_class
@property
def lowerCamelCase ( self : Optional[int] ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , a_ , )
return self.image_processor
| 610 | 1 |
import os
from datetime import datetime as dt
from github import Github
_UpperCAmelCase = [
'good first issue',
'feature request',
'wip',
]
def lowerCAmelCase_ ( ) -> Optional[int]:
UpperCamelCase_ = Github(os.environ["GITHUB_TOKEN"] )
UpperCamelCase_ = g.get_repo("huggingface/accelerate" )
UpperCamelCase_ = repo.get_issues(state="open" )
for issue in open_issues:
UpperCamelCase_ = sorted([comment for comment in issue.get_comments()] , key=lambda UpperCamelCase_ : i.created_at , reverse=UpperCamelCase_ )
UpperCamelCase_ = comments[0] if len(UpperCamelCase_ ) > 0 else None
UpperCamelCase_ = dt.utcnow()
UpperCamelCase_ = (current_time - issue.updated_at).days
UpperCamelCase_ = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state="closed" )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
if __name__ == "__main__":
main()
| 371 |
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def lowerCAmelCase_ ( ) -> Dict:
raise RuntimeError("CUDA out of memory." )
class _UpperCamelCase ( nn.Module ):
def __init__( self: int ) -> List[Any]:
"""simple docstring"""
super().__init__()
UpperCamelCase_ = nn.Linear(3 , 4 )
UpperCamelCase_ = nn.BatchNormad(4 )
UpperCamelCase_ = nn.Linear(4 , 5 )
def lowercase ( self: Any , _SCREAMING_SNAKE_CASE: Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(_SCREAMING_SNAKE_CASE ) ) )
class _UpperCamelCase ( unittest.TestCase ):
def lowercase ( self: List[Any] ) -> Any:
"""simple docstring"""
UpperCamelCase_ = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(_SCREAMING_SNAKE_CASE: str ):
nonlocal batch_sizes
batch_sizes.append(_SCREAMING_SNAKE_CASE )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(_SCREAMING_SNAKE_CASE , [128, 64, 32, 16, 8] )
def lowercase ( self: Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(_SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Optional[Any] ):
nonlocal batch_sizes
batch_sizes.append(_SCREAMING_SNAKE_CASE )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
UpperCamelCase_ , UpperCamelCase_ = mock_training_loop_function("hello" )
self.assertListEqual(_SCREAMING_SNAKE_CASE , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, "hello"] )
def lowercase ( self: Any ) -> Optional[int]:
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(_SCREAMING_SNAKE_CASE: Union[str, Any] ):
pass
with self.assertRaises(_SCREAMING_SNAKE_CASE ) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] )
def lowercase ( self: Dict ) -> Optional[int]:
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(_SCREAMING_SNAKE_CASE: Optional[int] ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(_SCREAMING_SNAKE_CASE ) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] )
def lowercase ( self: Optional[int] ) -> Dict:
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(_SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Tuple ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(_SCREAMING_SNAKE_CASE ) as cm:
mock_training_loop_function(128 , "hello" , "world" )
self.assertIn("Batch size was passed into `f`" , cm.exception.args[0] )
self.assertIn("`f(arg1='hello', arg2='world')" , cm.exception.args[0] )
def lowercase ( self: List[str] ) -> Optional[Any]:
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(_SCREAMING_SNAKE_CASE: List[Any] ):
raise ValueError("Oops, we had an error!" )
with self.assertRaises(_SCREAMING_SNAKE_CASE ) as cm:
mock_training_loop_function()
self.assertIn("Oops, we had an error!" , cm.exception.args[0] )
@require_cuda
def lowercase ( self: Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = torch.cuda.memory_allocated()
UpperCamelCase_ = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = release_memory(_SCREAMING_SNAKE_CASE )
self.assertEqual(torch.cuda.memory_allocated() , _SCREAMING_SNAKE_CASE )
| 371 | 1 |
'''simple docstring'''
import random
from .binary_exp_mod import bin_exp_mod
def UpperCAmelCase__ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any]=10_00 ) -> Union[str, Any]:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
__lowerCamelCase : Any = n - 1
__lowerCamelCase : Tuple = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
__lowerCamelCase : Tuple = 0
while count < prec:
__lowerCamelCase : List[str] = random.randint(2 , n - 1 )
__lowerCamelCase : Tuple = bin_exp_mod(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if b != 1:
__lowerCamelCase : Optional[Any] = True
for _ in range(UpperCAmelCase_ ):
if b == n - 1:
__lowerCamelCase : Any = False
break
__lowerCamelCase : List[str] = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
A__ : Tuple = abs(int(input("""Enter bound : """).strip()))
print("""Here's the list of primes:""")
print(""", """.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 13 | from __future__ import annotations
from collections import deque
class _lowerCamelCase :
def __init__( self , lowerCAmelCase ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: list[dict]= []
self.adlist.append(
{'''value''': '''''', '''next_states''': [], '''fail_state''': 0, '''output''': []} )
for keyword in keywords:
self.add_keyword(lowerCAmelCase )
self.set_fail_transitions()
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> int | None:
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def UpperCamelCase_ ( self , lowerCAmelCase ) -> None:
SCREAMING_SNAKE_CASE__: str= 0
for character in keyword:
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.find_next_state(lowerCAmelCase , lowerCAmelCase )
if next_state is None:
self.adlist.append(
{
'''value''': character,
'''next_states''': [],
'''fail_state''': 0,
'''output''': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
SCREAMING_SNAKE_CASE__: Dict= len(self.adlist ) - 1
else:
SCREAMING_SNAKE_CASE__: List[Any]= next_state
self.adlist[current_state]["output"].append(lowerCAmelCase )
def UpperCamelCase_ ( self ) -> None:
SCREAMING_SNAKE_CASE__: deque= deque()
for node in self.adlist[0]["next_states"]:
q.append(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= 0
while q:
SCREAMING_SNAKE_CASE__: Union[str, Any]= q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= self.adlist[r]['''fail_state''']
while (
self.find_next_state(lowerCAmelCase , self.adlist[child]['''value'''] ) is None
and state != 0
):
SCREAMING_SNAKE_CASE__: Tuple= self.adlist[state]['''fail_state''']
SCREAMING_SNAKE_CASE__: Dict= self.find_next_state(
lowerCAmelCase , self.adlist[child]['''value'''] )
if self.adlist[child]["fail_state"] is None:
SCREAMING_SNAKE_CASE__: Union[str, Any]= 0
SCREAMING_SNAKE_CASE__: str= (
self.adlist[child]['''output''']
+ self.adlist[self.adlist[child]['''fail_state''']]['''output''']
)
def UpperCamelCase_ ( self , lowerCAmelCase ) -> dict[str, list[int]]:
SCREAMING_SNAKE_CASE__: dict= {} # returns a dict with keywords and list of its occurrences
SCREAMING_SNAKE_CASE__: Optional[Any]= 0
for i in range(len(lowerCAmelCase ) ):
while (
self.find_next_state(lowerCAmelCase , string[i] ) is None
and current_state != 0
):
SCREAMING_SNAKE_CASE__: Optional[int]= self.adlist[current_state]['''fail_state''']
SCREAMING_SNAKE_CASE__: Optional[int]= self.find_next_state(lowerCAmelCase , string[i] )
if next_state is None:
SCREAMING_SNAKE_CASE__: List[Any]= 0
else:
SCREAMING_SNAKE_CASE__: Dict= next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
SCREAMING_SNAKE_CASE__: Optional[Any]= []
result[key].append(i - len(lowerCAmelCase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 0 |
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
lowerCAmelCase__ :List[str] = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = 'sshleifer/tiny-gpt2'
lowerCAmelCase__ :List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
lowerCAmelCase__ :Tuple = PyTorchBenchmark(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = 'sgugger/tiny-distilbert-classification'
lowerCAmelCase__ :Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , only_pretrain_model=__UpperCAmelCase , )
lowerCAmelCase__ :List[Any] = PyTorchBenchmark(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = 'sshleifer/tiny-gpt2'
lowerCAmelCase__ :Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , torchscript=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
lowerCAmelCase__ :Dict = PyTorchBenchmark(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = 'sshleifer/tiny-gpt2'
lowerCAmelCase__ :str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , fpaa=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
lowerCAmelCase__ :Union[str, Any] = PyTorchBenchmark(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = 'sshleifer/tiny-gpt2'
lowerCAmelCase__ :Any = AutoConfig.from_pretrained(__UpperCAmelCase )
# set architectures equal to `None`
lowerCAmelCase__ :int = None
lowerCAmelCase__ :Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
lowerCAmelCase__ :List[str] = PyTorchBenchmark(__UpperCAmelCase , configs=[config] )
lowerCAmelCase__ :Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = 'sshleifer/tiny-gpt2'
lowerCAmelCase__ :str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
lowerCAmelCase__ :str = PyTorchBenchmark(__UpperCAmelCase )
lowerCAmelCase__ :str = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == 'cpu' , 'Can\'t do half precision' )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = 'sshleifer/tiny-gpt2'
lowerCAmelCase__ :Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=__UpperCAmelCase , multi_process=__UpperCAmelCase , )
lowerCAmelCase__ :Optional[int] = PyTorchBenchmark(__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = 'sshleifer/tiny-gpt2'
lowerCAmelCase__ :int = AutoConfig.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
lowerCAmelCase__ :Any = PyTorchBenchmark(__UpperCAmelCase , configs=[config] )
lowerCAmelCase__ :Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = 'sshleifer/tinier_bart'
lowerCAmelCase__ :List[Any] = AutoConfig.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
lowerCAmelCase__ :List[Any] = PyTorchBenchmark(__UpperCAmelCase , configs=[config] )
lowerCAmelCase__ :int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = 'sshleifer/tiny-gpt2'
lowerCAmelCase__ :Tuple = AutoConfig.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
lowerCAmelCase__ :int = PyTorchBenchmark(__UpperCAmelCase , configs=[config] )
lowerCAmelCase__ :Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = 'sshleifer/tinier_bart'
lowerCAmelCase__ :Union[str, Any] = AutoConfig.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
lowerCAmelCase__ :str = PyTorchBenchmark(__UpperCAmelCase , configs=[config] )
lowerCAmelCase__ :Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ :List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , save_to_csv=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__UpperCAmelCase , 'inf_time.csv' ) , train_memory_csv_file=os.path.join(__UpperCAmelCase , 'train_mem.csv' ) , inference_memory_csv_file=os.path.join(__UpperCAmelCase , 'inf_mem.csv' ) , train_time_csv_file=os.path.join(__UpperCAmelCase , 'train_time.csv' ) , env_info_csv_file=os.path.join(__UpperCAmelCase , 'env.csv' ) , multi_process=__UpperCAmelCase , )
lowerCAmelCase__ :Tuple = PyTorchBenchmark(__UpperCAmelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(__UpperCAmelCase , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCAmelCase , 'train_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCAmelCase , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCAmelCase , 'train_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCAmelCase , 'env.csv' ) ).exists() )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(__UpperCAmelCase ):
self.assertTrue(hasattr(__UpperCAmelCase , 'sequential' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'cumulative' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'current' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ :Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__UpperCAmelCase , 'log.txt' ) , log_print=__UpperCAmelCase , trace_memory_line_by_line=__UpperCAmelCase , multi_process=__UpperCAmelCase , )
lowerCAmelCase__ :Union[str, Any] = PyTorchBenchmark(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(__UpperCAmelCase , 'log.txt' ) ).exists() )
| 709 |
"""simple docstring"""
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ) ->bool:
"""simple docstring"""
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_3170_4406_4679_8873_8596_1981 and not allow_probable:
raise ValueError(
'Warning: upper bound of deterministic test is exceeded. '
'Pass allow_probable=True to allow probabilistic test. '
'A return value of True indicates a probable prime.' )
# array bounds provided by analysis
lowerCAmelCase__ :int = [
2047,
137_3653,
2532_6001,
32_1503_1751,
2_1523_0289_8747,
3_4747_4966_0383,
341_5500_7172_8321,
1,
382_5123_0565_4641_3051,
1,
1,
3186_6585_7834_0311_5116_7461,
3_3170_4406_4679_8873_8596_1981,
]
lowerCAmelCase__ :List[Any] = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(_SCREAMING_SNAKE_CASE , 1 ):
if n < _p:
# then we have our last prime to check
lowerCAmelCase__ :Any = primes[:idx]
break
lowerCAmelCase__ , lowerCAmelCase__ :Dict = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
lowerCAmelCase__ :Optional[Any] = False
for r in range(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :List[str] = pow(_SCREAMING_SNAKE_CASE , d * 2**r , _SCREAMING_SNAKE_CASE )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
lowerCAmelCase__ :int = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def __A () ->None:
"""simple docstring"""
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(83_8201 )
assert miller_rabin(83_8207 )
# 1_373_653
assert not miller_rabin(1731_6001 )
assert miller_rabin(1731_6017 )
# 25_326_001
assert not miller_rabin(30_7838_6641 )
assert miller_rabin(30_7838_6653 )
# 3_215_031_751
assert not miller_rabin(1_7130_4557_4801 )
assert miller_rabin(1_7130_4557_4819 )
# 2_152_302_898_747
assert not miller_rabin(2_7797_9972_8307 )
assert miller_rabin(2_7797_9972_8327 )
# 3_474_749_660_383
assert not miller_rabin(113_8500_2390_9441 )
assert miller_rabin(113_8500_2390_9527 )
# 341_550_071_728_321
assert not miller_rabin(127_5041_0188_4880_4351 )
assert miller_rabin(127_5041_0188_4880_4391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(796_6646_4458_5077_8779_1867 )
assert miller_rabin(796_6646_4458_5077_8779_1951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(5528_4067_7446_6478_9766_0333 )
assert miller_rabin(5528_4067_7446_6478_9766_0359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 560 | 0 |
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
lowerCAmelCase : str = logging.get_logger("""transformers.models.speecht5""")
def _A ( A ,A ,A ) -> int:
hf_model.apply_weight_norm()
lowercase : int = checkpoint['input_conv.weight_g']
lowercase : Optional[Any] = checkpoint['input_conv.weight_v']
lowercase : str = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
lowercase : Tuple = checkpoint[F'''upsamples.{i}.1.weight_g''']
lowercase : Tuple = checkpoint[F'''upsamples.{i}.1.weight_v''']
lowercase : Optional[Any] = checkpoint[F'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
lowercase : Optional[Any] = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_g''']
lowercase : Tuple = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_v''']
lowercase : int = checkpoint[F'''blocks.{i}.convs1.{j}.1.bias''']
lowercase : List[str] = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_g''']
lowercase : str = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_v''']
lowercase : Optional[Any] = checkpoint[F'''blocks.{i}.convs2.{j}.1.bias''']
lowercase : List[str] = checkpoint['output_conv.1.weight_g']
lowercase : int = checkpoint['output_conv.1.weight_v']
lowercase : List[str] = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def _A ( A ,A ,A ,A=None ,A=None ,) -> str:
if config_path is not None:
lowercase : List[Any] = SpeechTaHifiGanConfig.from_pretrained(lowerCamelCase_ )
else:
lowercase : Optional[Any] = SpeechTaHifiGanConfig()
lowercase : Tuple = SpeechTaHifiGan(lowerCamelCase_ )
lowercase : Optional[int] = torch.load(lowerCamelCase_ )
load_weights(orig_checkpoint["model"]["generator"] ,lowerCamelCase_ ,lowerCamelCase_ )
lowercase : List[Any] = np.load(lowerCamelCase_ )
lowercase : Optional[Any] = stats[0].reshape(-1 )
lowercase : str = stats[1].reshape(-1 )
lowercase : Union[str, Any] = torch.from_numpy(lowerCamelCase_ ).float()
lowercase : Optional[Any] = torch.from_numpy(lowerCamelCase_ ).float()
model.save_pretrained(lowerCamelCase_ )
if repo_id:
print("Pushing to the hub..." )
model.push_to_hub(lowerCamelCase_ )
if __name__ == "__main__":
lowerCAmelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
lowerCAmelCase : List[str] = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 372 |
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class _UpperCamelCase( unittest.TestCase ):
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
for a, b in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertAlmostEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , delta=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
__a : List[Any] = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
__a : int = None
ops.enable_eager_execution_internal()
__a : Optional[Any] = tf.config.list_physical_devices('CPU' )
if len(SCREAMING_SNAKE_CASE__ ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
__a : int = tf.config.list_logical_devices(device_type='CPU' )
__a : str = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
__a : List[str] = GradientAccumulator()
__a : Tuple = tf.Variable([4.0, 3.0] )
__a , __a : int = create_optimizer(5e-5 , 1_0 , 5 )
__a : List[Any] = tf.Variable([0.0, 0.0] , trainable=SCREAMING_SNAKE_CASE__ )
def accumulate_on_replica(SCREAMING_SNAKE_CASE__ : Optional[Any] ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple ):
with strategy.scope():
__a : Optional[Any] = strategy.experimental_local_results(SCREAMING_SNAKE_CASE__ )
local_variables[0].assign(SCREAMING_SNAKE_CASE__ )
local_variables[1].assign(SCREAMING_SNAKE_CASE__ )
strategy.run(SCREAMING_SNAKE_CASE__ , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(SCREAMING_SNAKE_CASE__ )
def _check_local_values(SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int ):
__a : Union[str, Any] = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , SCREAMING_SNAKE_CASE__ , tol=1e-2 )
self.assertListAlmostEqual(values[1].value() , SCREAMING_SNAKE_CASE__ , tol=1e-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 47 | 0 |
'''simple docstring'''
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =hex_num.strip()
if not hex_num:
raise ValueError("No value was passed to the function" )
a_ =hex_num[0] == "-"
if is_negative:
a_ =hex_num[1:]
try:
a_ =int(lowercase__ , 1_6 )
except ValueError:
raise ValueError("Invalid value was passed to the function" )
a_ =""
while int_num > 0:
a_ =str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("-" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41 |
'''simple docstring'''
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
lowercase = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
lowercase = {
'''b0''': {
'''hidden_dim''': 1_280,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 224,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1_280,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 240,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 1_408,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 260,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 1_536,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 300,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 1_792,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 380,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2_048,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 456,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 2_304,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 528,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 2_560,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 600,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =EfficientNetConfig()
a_ =CONFIG_MAP[model_name]["hidden_dim"]
a_ =CONFIG_MAP[model_name]["width_coef"]
a_ =CONFIG_MAP[model_name]["depth_coef"]
a_ =CONFIG_MAP[model_name]["image_size"]
a_ =CONFIG_MAP[model_name]["dropout_rate"]
a_ =CONFIG_MAP[model_name]["dw_padding"]
a_ ="huggingface/label-files"
a_ ="imagenet-1k-id2label.json"
a_ =1_0_0_0
a_ =json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="dataset" ) , "r" ) )
a_ ={int(lowercase__ ): v for k, v in idalabel.items()}
a_ =idalabel
a_ ={v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ ="http://images.cocodataset.org/val2017/000000039769.jpg"
a_ =Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =CONFIG_MAP[model_name]["image_size"]
a_ =EfficientNetImageProcessor(
size={"height": size, "width": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=lowercase__ , )
return preprocessor
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =[v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
a_ =sorted(set(lowercase__ ) )
a_ =len(lowercase__ )
a_ ={b: str(lowercase__ ) for b, i in zip(lowercase__ , range(lowercase__ ) )}
a_ =[]
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
a_ =block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
a_ ={}
for item in rename_keys:
if item[0] in original_param_names:
a_ ="efficientnet." + item[1]
a_ ="classifier.weight"
a_ ="classifier.bias"
return key_mapping
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
for key, value in tf_params.items():
if "normalization" in key:
continue
a_ =key_mapping[key]
if "_conv" in key and "kernel" in key:
a_ =torch.from_numpy(lowercase__ ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
a_ =torch.from_numpy(lowercase__ ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
a_ =torch.from_numpy(np.transpose(lowercase__ ) )
else:
a_ =torch.from_numpy(lowercase__ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowercase__ )
@torch.no_grad()
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =model_classes[model_name](
include_top=lowercase__ , weights="imagenet" , input_tensor=lowercase__ , input_shape=lowercase__ , pooling=lowercase__ , classes=1_0_0_0 , classifier_activation="softmax" , )
a_ =original_model.trainable_variables
a_ =original_model.non_trainable_variables
a_ ={param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
a_ =param.numpy()
a_ =list(tf_params.keys() )
# Load HuggingFace model
a_ =get_efficientnet_config(lowercase__ )
a_ =EfficientNetForImageClassification(lowercase__ ).eval()
a_ =hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
a_ =rename_keys(lowercase__ )
replace_params(lowercase__ , lowercase__ , lowercase__ )
# Initialize preprocessor and preprocess input image
a_ =convert_image_processor(lowercase__ )
a_ =preprocessor(images=prepare_img() , return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
a_ =hf_model(**lowercase__ )
a_ =outputs.logits.detach().numpy()
# Original model inference
a_ =False
a_ =CONFIG_MAP[model_name]["image_size"]
a_ =prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
a_ =image.img_to_array(lowercase__ )
a_ =np.expand_dims(lowercase__ , axis=0 )
a_ =original_model.predict(lowercase__ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowercase__ , lowercase__ , atol=1E-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowercase__ ):
os.mkdir(lowercase__ )
# Save converted model and image processor
hf_model.save_pretrained(lowercase__ )
preprocessor.save_pretrained(lowercase__ )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
a_ =F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(lowercase__ )
hf_model.push_to_hub(lowercase__ )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
lowercase = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 41 | 1 |
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
SCREAMING_SNAKE_CASE__ : List[str] = """sshleifer/mar_enro_6_3_student"""
class UpperCamelCase__ (__UpperCAmelCase ):
'''simple docstring'''
def _lowercase ( self ) -> List[str]:
super().setUp()
lowerCamelCase : List[str] = cached_path(
"https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz" , extract_compressed_file=UpperCamelCase__ , )
lowerCamelCase : Tuple = F'''{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'''
@slow
@require_torch_gpu
def _lowercase ( self ) -> Union[str, Any]:
MarianMTModel.from_pretrained(UpperCamelCase__ )
@slow
@require_torch_gpu
def _lowercase ( self ) -> Any:
lowerCamelCase : List[str] = {
'$MAX_LEN': 64,
'$BS': 64,
'$GAS': 1,
'$ENRO_DIR': self.data_dir,
'facebook/mbart-large-cc25': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'--learning_rate=3e-5': '--learning_rate 3e-4',
'--num_train_epochs 6': '--num_train_epochs 1',
}
# Clean up bash script
lowerCamelCase : Optional[Any] = (self.test_file_dir / 'train_mbart_cc25_enro.sh').open().read().split("finetune.py" )[1].strip()
lowerCamelCase : Dict = bash_script.replace("\\\n" , "" ).strip().replace("\"$@\"" , "" )
for k, v in env_vars_to_replace.items():
lowerCamelCase : Optional[int] = bash_script.replace(UpperCamelCase__ , str(UpperCamelCase__ ) )
lowerCamelCase : Optional[int] = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
lowerCamelCase : List[Any] = F'''\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n '''.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
lowerCamelCase : Any = ['finetune.py'] + bash_script.split() + args
with patch.object(UpperCamelCase__ , "argv" , UpperCamelCase__ ):
lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
lowerCamelCase : Tuple = pl.Trainer.add_argparse_args(UpperCamelCase__ )
lowerCamelCase : int = SummarizationModule.add_model_specific_args(UpperCamelCase__ , os.getcwd() )
lowerCamelCase : Any = parser.parse_args()
lowerCamelCase : int = main(UpperCamelCase__ )
# Check metrics
lowerCamelCase : Union[str, Any] = load_json(model.metrics_save_path )
lowerCamelCase : int = metrics['val'][0]
lowerCamelCase : Union[str, Any] = metrics['val'][-1]
self.assertEqual(len(metrics["val"] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , UpperCamelCase__ )
self.assertGreater(last_step_stats["val_avg_gen_time"] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats["val_avg_gen_time"] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats["val_avg_bleu"] - first_step_stats["val_avg_bleu"] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats["val_avg_bleu"] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics["val"][-1]["val_avg_bleu"] - metrics["test"][-1]["test_avg_bleu"] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
lowerCamelCase : Tuple = os.listdir(UpperCamelCase__ )
lowerCamelCase : Dict = [x for x in contents if x.endswith(".ckpt" )][0]
lowerCamelCase : Any = os.path.join(args.output_dir , UpperCamelCase__ )
lowerCamelCase : Dict = torch.load(UpperCamelCase__ , map_location="cpu" )
lowerCamelCase : List[str] = 'model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
lowerCamelCase : int = {os.path.basename(UpperCamelCase__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["test"] ) == 1
class UpperCamelCase__ (__UpperCAmelCase ):
'''simple docstring'''
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def _lowercase ( self ) -> Tuple:
lowerCamelCase : Any = F'''{self.test_file_dir_str}/test_data/wmt_en_ro'''
lowerCamelCase : int = {
'--fp16_opt_level=O1': '',
'$MAX_LEN': 128,
'$BS': 16,
'$GAS': 1,
'$ENRO_DIR': data_dir,
'$m': 'sshleifer/student_marian_en_ro_6_1',
'val_check_interval=0.25': 'val_check_interval=1.0',
}
# Clean up bash script
lowerCamelCase : List[Any] = (
(self.test_file_dir / 'distil_marian_no_teacher.sh').open().read().split("distillation.py" )[1].strip()
)
lowerCamelCase : str = bash_script.replace("\\\n" , "" ).strip().replace("\"$@\"" , "" )
lowerCamelCase : int = bash_script.replace("--fp16 " , " " )
for k, v in env_vars_to_replace.items():
lowerCamelCase : Dict = bash_script.replace(UpperCamelCase__ , str(UpperCamelCase__ ) )
lowerCamelCase : Tuple = self.get_auto_remove_tmp_dir()
lowerCamelCase : List[str] = bash_script.replace("--fp16" , "" )
lowerCamelCase : List[str] = 6
lowerCamelCase : Optional[Any] = (
['distillation.py']
+ bash_script.split()
+ [
F'''--output_dir={output_dir}''',
'--gpus=1',
'--learning_rate=1e-3',
F'''--num_train_epochs={epochs}''',
'--warmup_steps=10',
'--val_check_interval=1.0',
'--do_predict',
]
)
with patch.object(UpperCamelCase__ , "argv" , UpperCamelCase__ ):
lowerCamelCase : Optional[int] = argparse.ArgumentParser()
lowerCamelCase : Union[str, Any] = pl.Trainer.add_argparse_args(UpperCamelCase__ )
lowerCamelCase : Dict = SummarizationDistiller.add_model_specific_args(UpperCamelCase__ , os.getcwd() )
lowerCamelCase : Optional[Any] = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
lowerCamelCase : List[str] = distill_main(UpperCamelCase__ )
# Check metrics
lowerCamelCase : List[str] = load_json(model.metrics_save_path )
lowerCamelCase : str = metrics['val'][0]
lowerCamelCase : Optional[int] = metrics['val'][-1]
assert len(metrics["val"] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , UpperCamelCase__ )
# check lightning ckpt can be loaded and has a reasonable statedict
lowerCamelCase : Union[str, Any] = os.listdir(UpperCamelCase__ )
lowerCamelCase : List[str] = [x for x in contents if x.endswith(".ckpt" )][0]
lowerCamelCase : Tuple = os.path.join(args.output_dir , UpperCamelCase__ )
lowerCamelCase : Optional[Any] = torch.load(UpperCamelCase__ , map_location="cpu" )
lowerCamelCase : Tuple = 'model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
lowerCamelCase : str = {os.path.basename(UpperCamelCase__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["test"] ) == 1
| 311 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class A__ :
"""simple docstring"""
__A : Optional[int] = None
__A : Optional[jnp.ndarray] = None
__A : Optional[jnp.ndarray] = None # sigma(t_i)
@classmethod
def __lowercase ( cls) -> Union[str, Any]:
'''simple docstring'''
return cls()
@dataclass
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : jnp.ndarray
__A : jnp.ndarray
__A : KarrasVeSchedulerState
class A__ ( __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
@property
def __lowercase ( self) -> str:
'''simple docstring'''
return True
@register_to_config
def __init__( self , lowercase = 0.02 , lowercase = 100 , lowercase = 1.0_07 , lowercase = 80 , lowercase = 0.05 , lowercase = 50 , ) -> List[Any]:
'''simple docstring'''
pass
def __lowercase ( self) -> str:
'''simple docstring'''
return KarrasVeSchedulerState.create()
def __lowercase ( self , lowercase , lowercase , lowercase = ()) -> KarrasVeSchedulerState:
'''simple docstring'''
a__ : Any = jnp.arange(0 , lowercase)[::-1].copy()
a__ : Any = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=lowercase , schedule=jnp.array(lowercase , dtype=jnp.floataa) , timesteps=lowercase , )
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , ) -> Tuple[jnp.ndarray, float]:
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
a__ : List[str] = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1)
else:
a__ : str = 0
# sample eps ~ N(0, S_noise^2 * I)
a__ : Optional[Any] = random.split(lowercase , num=1)
a__ : Optional[Any] = self.config.s_noise * random.normal(key=lowercase , shape=sample.shape)
a__ : str = sigma + gamma * sigma
a__ : int = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
'''simple docstring'''
a__ : Union[str, Any] = sample_hat + sigma_hat * model_output
a__ : Tuple = (sample_hat - pred_original_sample) / sigma_hat
a__ : Dict = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=lowercase , derivative=lowercase , state=lowercase)
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
'''simple docstring'''
a__ : Optional[int] = sample_prev + sigma_prev * model_output
a__ : Union[str, Any] = (sample_prev - pred_original_sample) / sigma_prev
a__ : List[Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=lowercase , derivative=lowercase , state=lowercase)
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase) -> int:
'''simple docstring'''
raise NotImplementedError()
| 302 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
UpperCamelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCamelCase = {
"""vocab_file""": {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""unc-nlp/lxmert-base-uncased""": (
"""https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
UpperCamelCase = {
"""unc-nlp/lxmert-base-uncased""": 512,
}
UpperCamelCase = {
"""unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True},
}
class UpperCamelCase__ ( snake_case__ ):
"""simple docstring"""
A__ : Tuple = VOCAB_FILES_NAMES
A__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
A__ : List[Any] = PRETRAINED_INIT_CONFIGURATION
A__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : List[Any] = LxmertTokenizer
def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__="[UNK]" , SCREAMING_SNAKE_CASE__="[SEP]" , SCREAMING_SNAKE_CASE__="[PAD]" , SCREAMING_SNAKE_CASE__="[CLS]" , SCREAMING_SNAKE_CASE__="[MASK]" , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ , ) -> Optional[int]:
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , do_lower_case=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , tokenize_chinese_chars=lowercase_ , strip_accents=lowercase_ , **lowercase_ , )
A__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowercase_ ) != do_lower_case
or normalizer_state.get("strip_accents" , lowercase_ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowercase_ ) != tokenize_chinese_chars
):
A__ = getattr(lowercase_ , normalizer_state.pop("type" ) )
A__ = do_lower_case
A__ = strip_accents
A__ = tokenize_chinese_chars
A__ = normalizer_class(**lowercase_ )
A__ = do_lower_case
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ) -> List[Any]:
A__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> int:
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> Any:
A__ = self._tokenizer.model.save(lowercase_ , name=lowercase_ )
return tuple(lowercase_ )
| 704 |
"""simple docstring"""
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=sys.maxsize ) -> str:
A__ = "bilinear"
A__ = max_size
A__ = short_edge_length
def __call__( self , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
A__ = []
for img in imgs:
A__ , A__ = img.shape[:2]
# later: provide list and randomly choose index for resize
A__ = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
A__ = size * 1.0 / min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if h < w:
A__ , A__ = size, scale * w
else:
A__ , A__ = scale * h, size
if max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) > self.max_size:
A__ = self.max_size * 1.0 / max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = newh * scale
A__ = neww * scale
A__ = int(neww + 0.5 )
A__ = int(newh + 0.5 )
if img.dtype == np.uinta:
A__ = Image.fromarray(SCREAMING_SNAKE_CASE__ )
A__ = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
A__ = np.asarray(SCREAMING_SNAKE_CASE__ )
else:
A__ = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
A__ = nn.functional.interpolate(
SCREAMING_SNAKE_CASE__ , (newh, neww) , mode=self.interp_method , align_corners=SCREAMING_SNAKE_CASE__ ).squeeze(0 )
img_augs.append(SCREAMING_SNAKE_CASE__ )
return img_augs
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ ) -> str:
A__ = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
A__ = cfg.INPUT.FORMAT
A__ = cfg.SIZE_DIVISIBILITY
A__ = cfg.PAD_VALUE
A__ = cfg.INPUT.MAX_SIZE_TEST
A__ = cfg.MODEL.DEVICE
A__ = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
A__ = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
A__ = lambda SCREAMING_SNAKE_CASE__ : (x - self.pixel_mean) / self.pixel_std
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
A__ = tuple(max(SCREAMING_SNAKE_CASE__ ) for s in zip(*[img.shape for img in images] ) )
A__ = [im.shape[-2:] for im in images]
A__ = [
nn.functional.pad(
SCREAMING_SNAKE_CASE__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
]
return torch.stack(SCREAMING_SNAKE_CASE__ ), torch.tensor(SCREAMING_SNAKE_CASE__ )
def __call__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ) -> Optional[int]:
with torch.no_grad():
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
A__ = [images]
if single_image:
assert len(SCREAMING_SNAKE_CASE__ ) == 1
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(SCREAMING_SNAKE_CASE__ , images.pop(SCREAMING_SNAKE_CASE__ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
SCREAMING_SNAKE_CASE__ , torch.as_tensor(img_tensorize(images.pop(SCREAMING_SNAKE_CASE__ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
A__ = torch.tensor([im.shape[:2] for im in images] )
A__ = self.aug(SCREAMING_SNAKE_CASE__ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
A__ = [self.normalizer(SCREAMING_SNAKE_CASE__ ) for x in images]
# now pad them to do the following operations
A__ , A__ = self.pad(SCREAMING_SNAKE_CASE__ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
A__ = torch.true_divide(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _lowerCamelCase ( UpperCAmelCase_ : List[Any], UpperCAmelCase_ : List[str] ) -> List[Any]:
"""simple docstring"""
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _lowerCamelCase ( UpperCAmelCase_ : List[str], UpperCAmelCase_ : Tuple[int, int] ) -> str:
"""simple docstring"""
assert torch.isfinite(UpperCAmelCase_ ).all(), "Box tensor contains infinite or NaN!"
A__ , A__ = box_size
tensor[:, 0].clamp_(min=0, max=UpperCAmelCase_ )
tensor[:, 1].clamp_(min=0, max=UpperCAmelCase_ )
tensor[:, 2].clamp_(min=0, max=UpperCAmelCase_ )
tensor[:, 3].clamp_(min=0, max=UpperCAmelCase_ )
| 562 | 0 |
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
A_ : Tuple = logging.get_logger(__name__)
def snake_case () -> Optional[Any]:
# Get the sagemaker specific mp parameters from smp_options variable.
UpperCamelCase_: Optional[Any] = os.getenv('SM_HP_MP_PARAMETERS' , '{}' )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
UpperCamelCase_: List[str] = json.loads(UpperCAmelCase__ )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
UpperCamelCase_: Any = os.getenv('SM_FRAMEWORK_PARAMS' , '{}' )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
UpperCamelCase_: Tuple = json.loads(UpperCAmelCase__ )
if not mpi_options.get('sagemaker_mpi_enabled' , UpperCAmelCase__ ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec('smdistributed' ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
a : str =field(
default='''''' , metadata={'''help''': '''Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'''} , )
def _a ( self ):
super().__post_init__()
warnings.warn(
'`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use '
'`TrainingArguments` instead.' , _lowerCamelCase , )
@cached_property
def _a ( self ):
logger.info('PyTorch: setting up devices' )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
'torch.distributed process group is initialized, but local_rank == -1. '
'In order to use Torch DDP, launch your script with `python -m torch.distributed.launch' )
if self.no_cuda:
UpperCamelCase_: str = torch.device('cpu' )
UpperCamelCase_: Optional[Any] = 0
elif is_sagemaker_model_parallel_available():
UpperCamelCase_: Optional[int] = smp.local_rank()
UpperCamelCase_: Any = torch.device('cuda' , _lowerCamelCase )
UpperCamelCase_: int = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend='smddp' , timeout=self.ddp_timeout_delta )
UpperCamelCase_: Optional[int] = int(os.getenv('SMDATAPARALLEL_LOCAL_RANK' ) )
UpperCamelCase_: Dict = torch.device('cuda' , self.local_rank )
UpperCamelCase_: Union[str, Any] = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
UpperCamelCase_: Union[str, Any] = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
UpperCamelCase_: Any = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend='nccl' , timeout=self.ddp_timeout_delta )
UpperCamelCase_: Optional[Any] = torch.device('cuda' , self.local_rank )
UpperCamelCase_: Optional[int] = 1
if device.type == "cuda":
torch.cuda.set_device(_lowerCamelCase )
return device
@property
def _a ( self ):
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def _a ( self ):
return not is_sagemaker_model_parallel_available()
@property
def _a ( self ):
return False | 57 |
print((lambda quine: quine % quine)('''print((lambda quine: quine %% quine)(%r))'''))
| 47 | 0 |
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def _lowerCamelCase ( _a = "isbn/0140328726" ):
"""simple docstring"""
_lowerCamelCase = olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('''/''' ) != 1:
_lowerCamelCase = F'''{olid} is not a valid Open Library olid'''
raise ValueError(UpperCAmelCase__ )
return requests.get(F'''https://openlibrary.org/{new_olid}.json''' ).json()
def _lowerCamelCase ( _a ):
"""simple docstring"""
_lowerCamelCase = {
'''title''': '''Title''',
'''publish_date''': '''Publish date''',
'''authors''': '''Authors''',
'''number_of_pages''': '''Number of pages:''',
'''first_sentence''': '''First sentence''',
'''isbn_10''': '''ISBN (10)''',
'''isbn_13''': '''ISBN (13)''',
}
_lowerCamelCase = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
_lowerCamelCase = [
get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors''']
]
_lowerCamelCase = data['''First sentence''']['''value''']
for key, value in data.items():
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
_lowerCamelCase = ''', '''.join(UpperCAmelCase__ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
_UpperCAmelCase = input("\nEnter the ISBN code to search (or \'quit\' to stop): ").strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F'Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.')
continue
print(F'\nSearching Open Library for ISBN: {isbn}...\n')
try:
_UpperCAmelCase = summarize_book(get_openlibrary_data(F'isbn/{isbn}'))
print("\n".join(F'{key}: {value}' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F'Sorry, there are no results for ISBN: {isbn}.')
| 715 |
from __future__ import annotations
from collections.abc import MutableSequence
class __magic_name__ :
"""simple docstring"""
def __init__( self , a__ , a__ ):
if len(a__ ) != degree + 1:
raise ValueError(
'''The number of coefficients should be equal to the degree + 1.''' )
_lowerCamelCase = list(a__ )
_lowerCamelCase = degree
def __add__( self , a__ ):
if self.degree > polynomial_a.degree:
_lowerCamelCase = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , a__ )
else:
_lowerCamelCase = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , a__ )
def __sub__( self , a__ ):
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self ):
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self , a__ ):
_lowerCamelCase = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , a__ )
def _UpperCAmelCase ( self , a__ ):
_lowerCamelCase = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self ):
_lowerCamelCase = ''''''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(a__ )
return polynomial
def __repr__( self ):
return self.__str__()
def _UpperCAmelCase ( self ):
_lowerCamelCase = [0] * self.degree
for i in range(self.degree ):
_lowerCamelCase = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , a__ )
def _UpperCAmelCase ( self , a__ = 0 ):
_lowerCamelCase = [0] * (self.degree + 2)
_lowerCamelCase = constant
for i in range(self.degree + 1 ):
_lowerCamelCase = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , a__ )
def __eq__( self , a__ ):
if not isinstance(a__ , a__ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self , a__ ):
return not self.__eq__(a__ )
| 297 | 0 |
def _UpperCAmelCase ( A ):
'''simple docstring'''
if isinstance(A , A ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(A , A ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
UpperCAmelCase__ =False
if num < 0:
UpperCAmelCase__ =True
UpperCAmelCase__ =-num
UpperCAmelCase__ =[]
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(A ) for e in binary )
return "0b" + "".join(str(A ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 625 |
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class snake_case_ :
'''simple docstring'''
def __init__( self, A_, A_=2, A_=32, A_=16, A_=3, A_=True, A_=True, A_=32, A_=4, A_=[0, 1, 2, 3], A_=4, A_=37, A_="gelu", A_=0.1, A_=0.1, A_=0.02, A_=3, A_=[1, 384, 24, 24], A_=True, A_=None, ) -> Optional[int]:
UpperCAmelCase__ =parent
UpperCAmelCase__ =batch_size
UpperCAmelCase__ =image_size
UpperCAmelCase__ =patch_size
UpperCAmelCase__ =num_channels
UpperCAmelCase__ =is_training
UpperCAmelCase__ =use_labels
UpperCAmelCase__ =hidden_size
UpperCAmelCase__ =num_hidden_layers
UpperCAmelCase__ =backbone_out_indices
UpperCAmelCase__ =num_attention_heads
UpperCAmelCase__ =intermediate_size
UpperCAmelCase__ =hidden_act
UpperCAmelCase__ =hidden_dropout_prob
UpperCAmelCase__ =attention_probs_dropout_prob
UpperCAmelCase__ =initializer_range
UpperCAmelCase__ =num_labels
UpperCAmelCase__ =backbone_featmap_shape
UpperCAmelCase__ =scope
UpperCAmelCase__ =is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase__ =(image_size // patch_size) ** 2
UpperCAmelCase__ =num_patches + 1
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase__ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ =None
if self.use_labels:
UpperCAmelCase__ =ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
UpperCAmelCase__ =self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase__ ={
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [96, 192, 384, 768],
"num_groups": 2,
}
return DPTConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, backbone_out_indices=self.backbone_out_indices, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=A_, initializer_range=self.initializer_range, is_hybrid=self.is_hybrid, backbone_config=A_, backbone_featmap_shape=self.backbone_featmap_shape, )
def __UpperCAmelCase ( self, A_, A_, A_ ) -> Optional[Any]:
UpperCAmelCase__ =DPTModel(config=A_ )
model.to(A_ )
model.eval()
UpperCAmelCase__ =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self, A_, A_, A_ ) -> Union[str, Any]:
UpperCAmelCase__ =self.num_labels
UpperCAmelCase__ =DPTForDepthEstimation(A_ )
model.to(A_ )
model.eval()
UpperCAmelCase__ =model(A_ )
self.parent.assertEqual(result.predicted_depth.shape, (self.batch_size, self.image_size, self.image_size) )
def __UpperCAmelCase ( self, A_, A_, A_ ) -> Optional[Any]:
UpperCAmelCase__ =self.num_labels
UpperCAmelCase__ =DPTForSemanticSegmentation(A_ )
model.to(A_ )
model.eval()
UpperCAmelCase__ =model(A_, labels=A_ )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase__ =self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ =config_and_inputs
UpperCAmelCase__ ={"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case_ ( a, a, unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
__UpperCamelCase = (
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase__ =DPTModelTester(self )
UpperCAmelCase__ =ConfigTester(self, config_class=A_, has_text_modality=A_, hidden_size=37 )
def __UpperCAmelCase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="DPT does not use inputs_embeds" )
def __UpperCAmelCase ( self ) -> List[Any]:
pass
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase__ , UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ =model_class(A_ )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
UpperCAmelCase__ =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_, nn.Linear ) )
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase__ , UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ =model_class(A_ )
UpperCAmelCase__ =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ =[*signature.parameters.keys()]
UpperCAmelCase__ =["pixel_values"]
self.assertListEqual(arg_names[:1], A_ )
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*A_ )
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A_ )
def __UpperCAmelCase ( self ) -> Any:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCAmelCase__ , UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ =True
if model_class in get_values(A_ ):
continue
UpperCAmelCase__ =model_class(A_ )
model.to(A_ )
model.train()
UpperCAmelCase__ =self._prepare_for_class(A_, A_, return_labels=A_ )
UpperCAmelCase__ =model(**A_ ).loss
loss.backward()
def __UpperCAmelCase ( self ) -> List[Any]:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCAmelCase__ , UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ =False
UpperCAmelCase__ =True
if model_class in get_values(A_ ) or not model_class.supports_gradient_checkpointing:
continue
UpperCAmelCase__ =model_class(A_ )
model.to(A_ )
model.gradient_checkpointing_enable()
model.train()
UpperCAmelCase__ =self._prepare_for_class(A_, A_, return_labels=A_ )
UpperCAmelCase__ =model(**A_ ).loss
loss.backward()
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase__ , UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ =_config_zero_init(A_ )
for model_class in self.all_model_classes:
UpperCAmelCase__ =model_class(config=A_ )
# Skip the check for the backbone
UpperCAmelCase__ =[]
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
UpperCAmelCase__ =[f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=f"""Parameter {name} of model {model_class} seems not properly initialized""", )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __UpperCAmelCase ( self ) -> List[Any]:
pass
@slow
def __UpperCAmelCase ( self ) -> Optional[int]:
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
UpperCAmelCase__ =DPTModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def __UpperCAmelCase ( self ) -> Any:
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
UpperCAmelCase__ , UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ ="add"
with self.assertRaises(A_ ):
UpperCAmelCase__ =DPTForDepthEstimation(A_ )
def _UpperCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
@slow
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase__ =DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas" )
UpperCAmelCase__ =DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas" ).to(A_ )
UpperCAmelCase__ =prepare_img()
UpperCAmelCase__ =image_processor(images=A_, return_tensors="pt" ).to(A_ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ =model(**A_ )
UpperCAmelCase__ =outputs.predicted_depth
# verify the predicted depth
UpperCAmelCase__ =torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape, A_ )
UpperCAmelCase__ =torch.tensor(
[[[5.64_37, 5.61_46, 5.65_11], [5.43_71, 5.56_49, 5.59_58], [5.52_15, 5.51_84, 5.52_93]]] ).to(A_ )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100, A_, atol=1E-4 ) )
| 625 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase__ : int = {'''configuration_encoder_decoder''': ['''EncoderDecoderConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Dict = ['''EncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Tuple = ['''TFEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Union[str, Any] = ['''FlaxEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
lowercase__ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 706 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase__ : Optional[int] = logging.get_logger(__name__)
lowercase__ : List[str] = {'''vocab_file''': '''spiece.model'''}
lowercase__ : Optional[int] = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
}
}
lowercase__ : List[str] = {
'''google/bigbird-roberta-base''': 40_96,
'''google/bigbird-roberta-large''': 40_96,
'''google/bigbird-base-trivia-itc''': 40_96,
}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = ['''input_ids''', '''attention_mask''']
lowerCAmelCase = []
def __init__( self , _UpperCAmelCase , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="[SEP]" , _UpperCAmelCase="[MASK]" , _UpperCAmelCase="[CLS]" , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
__A : Union[str, Any] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else bos_token
__A : List[Any] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else eos_token
__A : Optional[int] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else unk_token
__A : str = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else pad_token
__A : int = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else cls_token
__A : Optional[int] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
__A : Optional[Any] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else mask_token
__A : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , )
__A : Tuple = vocab_file
__A : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(_UpperCAmelCase)
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return self.sp_model.get_piece_size()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = {self.convert_ids_to_tokens(_UpperCAmelCase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self):
'''simple docstring'''
__A : Optional[int] = self.__dict__.copy()
__A : List[str] = None
return state
def __setstate__( self , _UpperCAmelCase):
'''simple docstring'''
__A : Dict = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
__A : Tuple = {}
__A : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return self.sp_model.piece_to_id(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[Any] = self.sp_model.IdToPiece(_UpperCAmelCase)
return token
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : str = []
__A : int = ''
__A : str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_UpperCAmelCase) + token
__A : Dict = True
__A : List[Any] = []
else:
current_sub_tokens.append(_UpperCAmelCase)
__A : Union[str, Any] = False
out_string += self.sp_model.decode(_UpperCAmelCase)
return out_string.strip()
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = True , **_UpperCAmelCase , ):
'''simple docstring'''
__A : Tuple = kwargs.pop('use_source_tokenizer' , _UpperCAmelCase)
__A : str = self.convert_ids_to_tokens(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
__A : Dict = []
__A : Dict = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_UpperCAmelCase))
__A : Any = []
sub_texts.append(_UpperCAmelCase)
else:
current_sub_text.append(_UpperCAmelCase)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_UpperCAmelCase))
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
__A : Tuple = re.sub(R' (\[(MASK|SEP)\])' , R'\1' , ' '.join(_UpperCAmelCase))
else:
__A : Any = ''.join(_UpperCAmelCase)
__A : List[str] = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
__A : str = self.clean_up_tokenization(_UpperCAmelCase)
return clean_text
else:
return text
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
if not os.path.isdir(_UpperCAmelCase):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
__A : int = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(_UpperCAmelCase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , _UpperCAmelCase)
elif not os.path.isfile(self.vocab_file):
with open(_UpperCAmelCase , 'wb') as fi:
__A : Any = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase)
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__A : Union[str, Any] = [self.cls_token_id]
__A : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase)
if token_ids_a is None:
return [1] + ([0] * len(_UpperCAmelCase)) + [1]
return [1] + ([0] * len(_UpperCAmelCase)) + [1] + ([0] * len(_UpperCAmelCase)) + [1]
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
__A : str = [self.sep_token_id]
__A : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] | 338 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCamelCase_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""GPTSw3Tokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 92 |
import math
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = [True] * n
lowercase = False
lowercase = False
lowercase = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
lowercase = i * 2
while index < n:
lowercase = False
lowercase = index + i
lowercase = [2]
for i in range(3 , __SCREAMING_SNAKE_CASE , 2 ):
if is_prime[i]:
primes.append(__SCREAMING_SNAKE_CASE )
return primes
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE = 9999_6666_3333 ):
lowercase = math.floor(math.sqrt(__SCREAMING_SNAKE_CASE ) ) + 100
lowercase = prime_sieve(__SCREAMING_SNAKE_CASE )
lowercase = 0
lowercase = 0
lowercase = primes[prime_index]
while (last_prime**2) <= limit:
lowercase = primes[prime_index + 1]
lowercase = last_prime**2
lowercase = next_prime**2
# Get numbers divisible by lps(current)
lowercase = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
lowercase = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
lowercase = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
lowercase = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 84 | 0 |
'''simple docstring'''
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
A = re.compile(r'\b(a|an|the)\b', re.UNICODE)
A = None
def lowercase_ ( ) ->Optional[int]:
_snake_case: Dict = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.' )
parser.add_argument('data_file' , metavar='data.json' , help='Input data JSON file.' )
parser.add_argument('pred_file' , metavar='pred.json' , help='Model predictions.' )
parser.add_argument(
'--out-file' , '-o' , metavar='eval.json' , help='Write accuracy metrics to file (default is stdout).' )
parser.add_argument(
'--na-prob-file' , '-n' , metavar='na_prob.json' , help='Model estimates of probability of no answer.' )
parser.add_argument(
'--na-prob-thresh' , '-t' , type=lowercase__ , default=1.0 , help='Predict \"\" if no-answer probability exceeds this (default = 1.0).' , )
parser.add_argument(
'--out-image-dir' , '-p' , metavar='out_images' , default=lowercase__ , help='Save precision-recall curves to directory.' )
parser.add_argument('--verbose' , '-v' , action='store_true' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def lowercase_ ( lowercase__ ) ->int:
_snake_case: str = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
_snake_case: Dict = bool(qa['answers']['text'] )
return qid_to_has_ans
def lowercase_ ( lowercase__ ) ->Any:
def remove_articles(lowercase__ ):
return ARTICLES_REGEX.sub(' ' , lowercase__ )
def white_space_fix(lowercase__ ):
return " ".join(text.split() )
def remove_punc(lowercase__ ):
_snake_case: Optional[Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowercase__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowercase__ ) ) ) )
def lowercase_ ( lowercase__ ) ->List[str]:
if not s:
return []
return normalize_answer(lowercase__ ).split()
def lowercase_ ( lowercase__ , lowercase__ ) ->Union[str, Any]:
return int(normalize_answer(lowercase__ ) == normalize_answer(lowercase__ ) )
def lowercase_ ( lowercase__ , lowercase__ ) ->Union[str, Any]:
_snake_case: List[Any] = get_tokens(lowercase__ )
_snake_case: str = get_tokens(lowercase__ )
_snake_case: Tuple = collections.Counter(lowercase__ ) & collections.Counter(lowercase__ )
_snake_case: Optional[Any] = sum(common.values() )
if len(lowercase__ ) == 0 or len(lowercase__ ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
_snake_case: Optional[int] = 1.0 * num_same / len(lowercase__ )
_snake_case: Any = 1.0 * num_same / len(lowercase__ )
_snake_case: str = (2 * precision * recall) / (precision + recall)
return fa
def lowercase_ ( lowercase__ , lowercase__ ) ->List[Any]:
_snake_case: Dict = {}
_snake_case: List[Any] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
_snake_case: Union[str, Any] = qa["""id"""]
_snake_case: Tuple = [t for t in qa["""answers"""]["""text"""] if normalize_answer(lowercase__ )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
_snake_case: Optional[int] = [""""""]
if qid not in preds:
print(F'''Missing prediction for {qid}''' )
continue
_snake_case: Dict = preds[qid]
# Take max over all gold answers
_snake_case: Tuple = max(compute_exact(lowercase__ , lowercase__ ) for a in gold_answers )
_snake_case: Tuple = max(compute_fa(lowercase__ , lowercase__ ) for a in gold_answers )
return exact_scores, fa_scores
def lowercase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) ->Optional[int]:
_snake_case: Optional[Any] = {}
for qid, s in scores.items():
_snake_case: str = na_probs[qid] > na_prob_thresh
if pred_na:
_snake_case: Tuple = float(not qid_to_has_ans[qid] )
else:
_snake_case: Optional[int] = s
return new_scores
def lowercase_ ( lowercase__ , lowercase__ , lowercase__=None ) ->Optional[Any]:
if not qid_list:
_snake_case: Dict = len(lowercase__ )
return collections.OrderedDict(
[
('exact', 1_0_0.0 * sum(exact_scores.values() ) / total),
('f1', 1_0_0.0 * sum(fa_scores.values() ) / total),
('total', total),
] )
else:
_snake_case: Dict = len(lowercase__ )
return collections.OrderedDict(
[
('exact', 1_0_0.0 * sum(exact_scores[k] for k in qid_list ) / total),
('f1', 1_0_0.0 * sum(fa_scores[k] for k in qid_list ) / total),
('total', total),
] )
def lowercase_ ( lowercase__ , lowercase__ , lowercase__ ) ->Any:
for k in new_eval:
_snake_case: Tuple = new_eval[k]
def lowercase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) ->List[Any]:
plt.step(lowercase__ , lowercase__ , color='b' , alpha=0.2 , where='post' )
plt.fill_between(lowercase__ , lowercase__ , step='post' , alpha=0.2 , color='b' )
plt.xlabel('Recall' )
plt.ylabel('Precision' )
plt.xlim([0.0, 1.0_5] )
plt.ylim([0.0, 1.0_5] )
plt.title(lowercase__ )
plt.savefig(lowercase__ )
plt.clf()
def lowercase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__=None , lowercase__=None ) ->Optional[int]:
_snake_case: Union[str, Any] = sorted(lowercase__ , key=lambda lowercase__ : na_probs[k] )
_snake_case: List[Any] = 0.0
_snake_case: Dict = 1.0
_snake_case: Tuple = 0.0
_snake_case: Any = [1.0]
_snake_case: str = [0.0]
_snake_case: Optional[int] = 0.0
for i, qid in enumerate(lowercase__ ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
_snake_case: List[Any] = true_pos / float(i + 1 )
_snake_case: List[Any] = true_pos / float(lowercase__ )
if i == len(lowercase__ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(lowercase__ )
recalls.append(lowercase__ )
if out_image:
plot_pr_curve(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
return {"ap": 1_0_0.0 * avg_prec}
def lowercase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) ->List[Any]:
if out_image_dir and not os.path.exists(lowercase__ ):
os.makedirs(lowercase__ )
_snake_case: Dict = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
_snake_case: Optional[int] = make_precision_recall_eval(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , out_image=os.path.join(lowercase__ , 'pr_exact.png' ) , title='Precision-Recall curve for Exact Match score' , )
_snake_case: Dict = make_precision_recall_eval(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , out_image=os.path.join(lowercase__ , 'pr_f1.png' ) , title='Precision-Recall curve for F1 score' , )
_snake_case: Any = {k: float(lowercase__ ) for k, v in qid_to_has_ans.items()}
_snake_case: str = make_precision_recall_eval(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , out_image=os.path.join(lowercase__ , 'pr_oracle.png' ) , title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)' , )
merge_eval(lowercase__ , lowercase__ , 'pr_exact' )
merge_eval(lowercase__ , lowercase__ , 'pr_f1' )
merge_eval(lowercase__ , lowercase__ , 'pr_oracle' )
def lowercase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) ->int:
if not qid_list:
return
_snake_case: Union[str, Any] = [na_probs[k] for k in qid_list]
_snake_case: Any = np.ones_like(lowercase__ ) / float(len(lowercase__ ) )
plt.hist(lowercase__ , weights=lowercase__ , bins=20 , range=(0.0, 1.0) )
plt.xlabel('Model probability of no-answer' )
plt.ylabel('Proportion of dataset' )
plt.title(F'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(lowercase__ , F'''na_prob_hist_{name}.png''' ) )
plt.clf()
def lowercase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) ->Tuple:
_snake_case: int = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
_snake_case: str = num_no_ans
_snake_case: Dict = cur_score
_snake_case: Optional[Any] = 0.0
_snake_case: Dict = sorted(lowercase__ , key=lambda lowercase__ : na_probs[k] )
for i, qid in enumerate(lowercase__ ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
_snake_case: Dict = scores[qid]
else:
if preds[qid]:
_snake_case: Union[str, Any] = -1
else:
_snake_case: Tuple = 0
cur_score += diff
if cur_score > best_score:
_snake_case: int = cur_score
_snake_case: int = na_probs[qid]
return 1_0_0.0 * best_score / len(lowercase__ ), best_thresh
def lowercase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) ->Dict:
_snake_case: List[str] = find_best_thresh(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
_snake_case: List[str] = find_best_thresh(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
_snake_case: int = best_exact
_snake_case: Any = exact_thresh
_snake_case: str = best_fa
_snake_case: Any = fa_thresh
def lowercase_ ( ) ->int:
with open(OPTS.data_file ) as f:
_snake_case: Any = json.load(lowercase__ )
_snake_case: Optional[Any] = dataset_json["""data"""]
with open(OPTS.pred_file ) as f:
_snake_case: List[Any] = json.load(lowercase__ )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
_snake_case: Union[str, Any] = json.load(lowercase__ )
else:
_snake_case: int = {k: 0.0 for k in preds}
_snake_case: List[str] = make_qid_to_has_ans(lowercase__ ) # maps qid to True/False
_snake_case: str = [k for k, v in qid_to_has_ans.items() if v]
_snake_case: Optional[Any] = [k for k, v in qid_to_has_ans.items() if not v]
_snake_case: Union[str, Any] = get_raw_scores(lowercase__ , lowercase__ )
_snake_case: Any = apply_no_ans_threshold(lowercase__ , lowercase__ , lowercase__ , OPTS.na_prob_thresh )
_snake_case: Optional[int] = apply_no_ans_threshold(lowercase__ , lowercase__ , lowercase__ , OPTS.na_prob_thresh )
_snake_case: Optional[Any] = make_eval_dict(lowercase__ , lowercase__ )
if has_ans_qids:
_snake_case: int = make_eval_dict(lowercase__ , lowercase__ , qid_list=lowercase__ )
merge_eval(lowercase__ , lowercase__ , 'HasAns' )
if no_ans_qids:
_snake_case: Union[str, Any] = make_eval_dict(lowercase__ , lowercase__ , qid_list=lowercase__ )
merge_eval(lowercase__ , lowercase__ , 'NoAns' )
if OPTS.na_prob_file:
find_all_best_thresh(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , OPTS.out_image_dir )
histogram_na_prob(lowercase__ , lowercase__ , OPTS.out_image_dir , 'hasAns' )
histogram_na_prob(lowercase__ , lowercase__ , OPTS.out_image_dir , 'noAns' )
if OPTS.out_file:
with open(OPTS.out_file , 'w' ) as f:
json.dump(lowercase__ , lowercase__ )
else:
print(json.dumps(lowercase__ , indent=2 ) )
if __name__ == "__main__":
A = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
main()
| 714 |
'''simple docstring'''
from __future__ import annotations
A : Union[str, Any] = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
A : Optional[int] = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def lowercase_ ( lowercase__ ) ->list[float]:
_snake_case: Tuple = []
_snake_case: List[Any] = len(lowercase__ )
for i in range(lowercase__ ):
_snake_case: float = -1
for j in range(i + 1 , lowercase__ ):
if arr[i] < arr[j]:
_snake_case: List[str] = arr[j]
break
result.append(lowercase__ )
return result
def lowercase_ ( lowercase__ ) ->list[float]:
_snake_case: Tuple = []
for i, outer in enumerate(lowercase__ ):
_snake_case: float = -1
for inner in arr[i + 1 :]:
if outer < inner:
_snake_case: List[Any] = inner
break
result.append(lowercase__ )
return result
def lowercase_ ( lowercase__ ) ->list[float]:
_snake_case: int = len(lowercase__ )
_snake_case: list[float] = []
_snake_case: list[float] = [-1] * arr_size
for index in reversed(range(lowercase__ ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
_snake_case: Dict = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
A : Union[str, Any] = (
'from __main__ import arr, next_greatest_element_slow, '
'next_greatest_element_fast, next_greatest_element'
)
print(
'next_greatest_element_slow():',
timeit('next_greatest_element_slow(arr)', setup=setup),
)
print(
'next_greatest_element_fast():',
timeit('next_greatest_element_fast(arr)', setup=setup),
)
print(
' next_greatest_element():',
timeit('next_greatest_element(arr)', setup=setup),
)
| 273 | 0 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_5_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'pytorch',
'script': 'run_ddp.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'tensorflow',
'script': 'run_tf_dist.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.6, 'eval_loss': 0.7},
},
] )
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Dict ):
if self.framework == "pytorch":
subprocess.run(
F"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() ,encoding='''utf-8''' ,check=lowercase__ ,)
assert hasattr(self ,'''env''' )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Optional[int] ):
__lowercase = F"{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"
# distributed data settings
__lowercase = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None
# creates estimator
return HuggingFace(
entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=lowercase__ ,instance_count=lowercase__ ,instance_type=self.instance_type ,debugger_hook_config=lowercase__ ,hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} ,metric_definitions=self.env.metric_definitions ,distribution=lowercase__ ,py_version='''py36''' ,)
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Optional[int] ):
TrainingJobAnalytics(lowercase__ ).export_csv(F"{self.env.test_path}/{job_name}_metrics.csv" )
@parameterized.expand([(2,)] )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Tuple ):
# create estimator
__lowercase = self.create_estimator(lowercase__ )
# run training
estimator.fit()
# result dataframe
__lowercase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__lowercase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
__lowercase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowercase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' ,9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"{estimator.latest_training_job.name}.json" ,'''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} ,lowercase__ )
| 41 |
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
_A = (boundary[1] - boundary[0]) / steps
_A = boundary[0]
_A = boundary[1]
_A = make_points(_lowercase , _lowercase , _lowercase )
_A = 0.0
y += (h / 2.0) * f(_lowercase )
for i in x_i:
# print(i)
y += h * f(_lowercase )
y += (h / 2.0) * f(_lowercase )
return y
def __A ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = a + h
while x < (b - h):
yield x
_A = x + h
def __A ( _lowercase ): # enter your function here
'''simple docstring'''
_A = (x - 0) * (x - 0)
return y
def __A ( ):
'''simple docstring'''
_A = 0.0 # Lower bound of integration
_A = 1.0 # Upper bound of integration
_A = 10.0 # define number of steps or resolution
_A = [a, b] # define boundary of integration
_A = method_a(_lowercase , _lowercase )
print(f"""y = {y}""" )
if __name__ == "__main__":
main()
| 484 | 0 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
UpperCamelCase = logging.getLogger(__name__)
def __lowerCamelCase ( __lowerCAmelCase : torch.nn.Module , __lowerCAmelCase : BnbQuantizationConfig , __lowerCAmelCase : Union[str, os.PathLike] = None , __lowerCAmelCase : Optional[Dict[str, Union[int, str, torch.device]]] = None , __lowerCAmelCase : Optional[List[str]] = None , __lowerCAmelCase : Optional[Dict[Union[int, str], Union[int, str]]] = None , __lowerCAmelCase : Optional[Union[str, os.PathLike]] = None , __lowerCAmelCase : bool = False , ) -> str:
__UpperCamelCase : Tuple = bnb_quantization_config.load_in_abit
__UpperCamelCase : List[str] = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
__UpperCamelCase : Any = []
# custom device map
if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and len(device_map.keys() ) > 1:
__UpperCamelCase : Dict = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
__UpperCamelCase : Tuple = get_keys_to_not_convert(__lowerCAmelCase )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(__lowerCAmelCase )
__UpperCamelCase : Any = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
__UpperCamelCase : int = []
__UpperCamelCase : Optional[Any] = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(__lowerCAmelCase )
# compatibility with peft
__UpperCamelCase : Tuple = load_in_abit
__UpperCamelCase : Union[str, Any] = load_in_abit
__UpperCamelCase : List[str] = get_parameter_device(__lowerCAmelCase )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
__UpperCamelCase : int = replace_with_bnb_layers(__lowerCAmelCase , __lowerCAmelCase , modules_to_not_convert=__lowerCAmelCase )
# convert param to the right dtype
__UpperCamelCase : List[str] = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
__UpperCamelCase : str = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" )
__UpperCamelCase : Tuple = getattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(__lowerCAmelCase ):
param.to(__lowerCAmelCase )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
f'The model device type is {model_device.type}. However, cuda is needed for quantization.'
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
f'`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ' )
else:
with init_empty_weights():
__UpperCamelCase : Any = replace_with_bnb_layers(
__lowerCAmelCase , __lowerCAmelCase , modules_to_not_convert=__lowerCAmelCase )
__UpperCamelCase : Any = get_quantized_model_device_map(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , max_memory=__lowerCAmelCase , no_split_module_classes=__lowerCAmelCase , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
__UpperCamelCase : Dict = True
__UpperCamelCase : str = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , dtype=bnb_quantization_config.torch_dtype , offload_folder=__lowerCAmelCase , offload_state_dict=__lowerCAmelCase , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(__lowerCAmelCase , device_map=__lowerCAmelCase , offload_dir=__lowerCAmelCase )
def __lowerCamelCase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : Any=None ) -> Dict:
if device_map is None:
if torch.cuda.is_available():
__UpperCamelCase : Union[str, Any] = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """
"""'sequential'.""" )
__UpperCamelCase : Union[str, Any] = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
__UpperCamelCase : int = {}
__UpperCamelCase : Any = special_dtypes
__UpperCamelCase : Dict = no_split_module_classes
__UpperCamelCase : Optional[Any] = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
__UpperCamelCase : str = get_balanced_memory(
__lowerCAmelCase , low_zero=(device_map == """balanced_low_0""") , max_memory=__lowerCAmelCase , **__lowerCAmelCase , )
__UpperCamelCase : Optional[int] = max_memory
__UpperCamelCase : int = infer_auto_device_map(__lowerCAmelCase , **__lowerCAmelCase )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
# check if don't have any quantized module on the cpu
__UpperCamelCase : int = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
__UpperCamelCase : Optional[int] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def __lowerCamelCase ( __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str=None , __lowerCAmelCase : Union[str, Any]=None ) -> List[Any]:
if modules_to_not_convert is None:
__UpperCamelCase : int = []
__UpperCamelCase , __UpperCamelCase : Optional[Any] = _replace_with_bnb_layers(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def __lowerCamelCase ( __lowerCAmelCase : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : List[str]=None , ) -> Dict:
__UpperCamelCase : int = False
for name, module in model.named_children():
if current_key_name is None:
__UpperCamelCase : int = []
current_key_name.append(__lowerCAmelCase )
if isinstance(__lowerCAmelCase , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
__UpperCamelCase : Dict = """.""".join(__lowerCAmelCase )
__UpperCamelCase : List[Any] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
__UpperCamelCase : Optional[int] = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
__UpperCamelCase : Optional[int] = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=__lowerCAmelCase , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
__UpperCamelCase : List[str] = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" )
__UpperCamelCase : List[str] = module.weight.data
if module.bias is not None:
__UpperCamelCase : Optional[Any] = module.bias.data
bnb_module.requires_grad_(__lowerCAmelCase )
setattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__UpperCamelCase : Dict = True
if len(list(module.children() ) ) > 0:
__UpperCamelCase , __UpperCamelCase : Optional[int] = _replace_with_bnb_layers(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__UpperCamelCase : int = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __lowerCamelCase ( __lowerCAmelCase : List[str] ) -> Tuple:
# Create a copy of the model
with init_empty_weights():
__UpperCamelCase : List[Any] = deepcopy(__lowerCAmelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
__UpperCamelCase : Optional[int] = find_tied_parameters(__lowerCAmelCase )
# For compatibility with Accelerate < 0.18
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
__UpperCamelCase : Any = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
__UpperCamelCase : Union[str, Any] = sum(__lowerCAmelCase , [] )
__UpperCamelCase : Optional[int] = len(__lowerCAmelCase ) > 0
# Check if it is a base model
__UpperCamelCase : Union[str, Any] = False
if hasattr(__lowerCAmelCase , """base_model_prefix""" ):
__UpperCamelCase : Tuple = not hasattr(__lowerCAmelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
__UpperCamelCase : Any = list(model.named_children() )
__UpperCamelCase : Dict = [list_modules[-1][0]]
# add last module together with tied weights
__UpperCamelCase : int = set(__lowerCAmelCase ) - set(__lowerCAmelCase )
__UpperCamelCase : List[str] = list(set(__lowerCAmelCase ) ) + list(__lowerCAmelCase )
# remove ".weight" from the keys
__UpperCamelCase : Any = [""".weight""", """.bias"""]
__UpperCamelCase : int = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
__UpperCamelCase : str = name.replace(__lowerCAmelCase , """""" )
filtered_module_names.append(__lowerCAmelCase )
return filtered_module_names
def __lowerCamelCase ( __lowerCAmelCase : Optional[Any] ) -> List[str]:
for m in model.modules():
if isinstance(__lowerCAmelCase , bnb.nn.Linearabit ):
return True
return False
def __lowerCamelCase ( __lowerCAmelCase : nn.Module ) -> str:
return next(parameter.parameters() ).device
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : Dict ) -> Tuple:
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(__lowerCAmelCase , __lowerCAmelCase , 0 , dtype=__lowerCAmelCase , value=__lowerCAmelCase )
__UpperCamelCase : Optional[Any] = param_name
__UpperCamelCase : List[str] = model
if "." in tensor_name:
__UpperCamelCase : Tuple = tensor_name.split(""".""" )
for split in splits[:-1]:
__UpperCamelCase : Dict = getattr(__lowerCAmelCase , __lowerCAmelCase )
if new_module is None:
raise ValueError(f'{module} has no attribute {split}.' )
__UpperCamelCase : Optional[Any] = new_module
__UpperCamelCase : Optional[int] = splits[-1]
# offload weights
__UpperCamelCase : Any = False
offload_weight(module._parameters[tensor_name] , __lowerCAmelCase , __lowerCAmelCase , index=__lowerCAmelCase )
if hasattr(module._parameters[tensor_name] , """SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , __lowerCAmelCase , index=__lowerCAmelCase , )
else:
offload_weight(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , index=__lowerCAmelCase )
offload_weight(__lowerCAmelCase , param_name.replace("""weight""" , """SCB""" ) , __lowerCAmelCase , index=__lowerCAmelCase )
set_module_tensor_to_device(__lowerCAmelCase , __lowerCAmelCase , """meta""" , dtype=__lowerCAmelCase , value=torch.empty(*param.size() ) )
| 515 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
UpperCamelCase = {'tokenization_tapex': ['TapexTokenizer']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 515 | 1 |
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase = 1_000_000 ) -> int:
'''simple docstring'''
lowerCAmelCase : Dict = set(range(3, _UpperCAmelCase, 2 ) )
primes.add(2 )
for p in range(3, _UpperCAmelCase, 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p, _UpperCAmelCase, _UpperCAmelCase ) ) )
lowerCAmelCase : Union[str, Any] = [float(_UpperCAmelCase ) for n in range(limit + 1 )]
for p in primes:
for n in range(_UpperCAmelCase, limit + 1, _UpperCAmelCase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F'{solution() = }')
| 343 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
__A : Tuple = '''
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
'''
class __A ( unittest.TestCase , lowerCAmelCase ):
def lowercase__ ( self : str ):
lowerCAmelCase : List[Any] = load_tool('text-question-answering' )
self.tool.setup()
lowerCAmelCase : List[Any] = load_tool('text-question-answering' , remote=UpperCAmelCase_ )
def lowercase__ ( self : str ):
lowerCAmelCase : List[str] = self.tool(UpperCAmelCase_ , 'What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase_ , 'launched the BigScience Research Workshop' )
def lowercase__ ( self : str ):
lowerCAmelCase : Tuple = self.remote_tool(UpperCAmelCase_ , 'What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase_ , 'launched the BigScience Research Workshop' )
def lowercase__ ( self : str ):
lowerCAmelCase : str = self.tool(text=UpperCAmelCase_ , question='What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase_ , 'launched the BigScience Research Workshop' )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : Dict = self.remote_tool(text=UpperCAmelCase_ , question='What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase_ , 'launched the BigScience Research Workshop' )
| 343 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_: Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase_: List[Any] = {
'''tanreinama/GPTSAN-2.8B-spout_is_uniform''': (
'''https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json'''
),
}
class a__ ( snake_case__ ):
snake_case_ = '''gptsan-japanese'''
snake_case_ = [
'''past_key_values''',
]
snake_case_ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self, _UpperCAmelCase=3_6000, _UpperCAmelCase=1280, _UpperCAmelCase=1024, _UpperCAmelCase=8192, _UpperCAmelCase=4096, _UpperCAmelCase=128, _UpperCAmelCase=10, _UpperCAmelCase=0, _UpperCAmelCase=16, _UpperCAmelCase=16, _UpperCAmelCase=128, _UpperCAmelCase=0.0, _UpperCAmelCase=1E-5, _UpperCAmelCase=False, _UpperCAmelCase=0.0, _UpperCAmelCase="float32", _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=0.002, _UpperCAmelCase=False, _UpperCAmelCase=True, _UpperCAmelCase=3_5998, _UpperCAmelCase=3_5995, _UpperCAmelCase=3_5999, **_UpperCAmelCase, ):
'''simple docstring'''
lowercase__ = vocab_size
lowercase__ = max_position_embeddings
lowercase__ = d_model
lowercase__ = d_ff
lowercase__ = d_ext
lowercase__ = d_spout
lowercase__ = num_switch_layers
lowercase__ = num_ext_layers
lowercase__ = num_switch_layers + num_ext_layers
lowercase__ = num_heads
lowercase__ = num_experts
lowercase__ = expert_capacity
lowercase__ = dropout_rate
lowercase__ = layer_norm_epsilon
lowercase__ = router_bias
lowercase__ = router_jitter_noise
lowercase__ = router_dtype
lowercase__ = router_ignore_padding_tokens
lowercase__ = output_hidden_states
lowercase__ = output_attentions
lowercase__ = initializer_factor
lowercase__ = output_router_logits
lowercase__ = use_cache
super().__init__(
separator_token_id=_A, pad_token_id=_A, eos_token_id=_A, **_A, )
| 711 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_: Union[str, Any] = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Union[str, Any] = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Any = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Tuple = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Optional[Any] = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase_: Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 668 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class lowercase_ (unittest.TestCase ):
lowerCAmelCase__ =ViTImageProcessor if is_vision_available() else None
@property
def __a ( self : Optional[Any] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = (3, 32, 1_28)
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
# fmt: off
SCREAMING_SNAKE_CASE_ = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
SCREAMING_SNAKE_CASE_ = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(snake_case__ ) + '\n' )
SCREAMING_SNAKE_CASE_ = {
'do_normalize': False,
'do_resize': True,
'image_processor_type': 'ViTImageProcessor',
'resample': 3,
'size': {'height': 32, 'width': 1_28},
}
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , snake_case__ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(snake_case__ , snake_case__ )
def __a ( self : List[str] , **snake_case__ : Any ):
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def __a ( self : Optional[int] , **snake_case__ : Optional[int] ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **snake_case__ )
def __a ( self : str ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __a ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )
SCREAMING_SNAKE_CASE_ = Image.fromarray(np.moveaxis(snake_case__ , 0 , -1 ) )
return image_input
def __a ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = MgpstrProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=snake_case__ )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , snake_case__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case__ )
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = MgpstrProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
SCREAMING_SNAKE_CASE_ = self.get_image_processor(do_normalize=snake_case__ , padding_value=1.0 )
SCREAMING_SNAKE_CASE_ = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=snake_case__ , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , snake_case__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case__ )
def __a ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = MgpstrProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ = image_processor(snake_case__ , return_tensors='np' )
SCREAMING_SNAKE_CASE_ = processor(images=snake_case__ , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __a ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = MgpstrProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
SCREAMING_SNAKE_CASE_ = 'test'
SCREAMING_SNAKE_CASE_ = processor(text=snake_case__ )
SCREAMING_SNAKE_CASE_ = tokenizer(snake_case__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __a ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = MgpstrProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
SCREAMING_SNAKE_CASE_ = 'test'
SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ = processor(text=snake_case__ , images=snake_case__ )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'labels'] )
# test if it raises when no input is passed
with pytest.raises(snake_case__ ):
processor()
def __a ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = MgpstrProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
SCREAMING_SNAKE_CASE_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE_ = processor.char_decode(snake_case__ )
SCREAMING_SNAKE_CASE_ = tokenizer.batch_decode(snake_case__ )
SCREAMING_SNAKE_CASE_ = [seq.replace(' ' , '' ) for seq in decoded_tok]
self.assertListEqual(snake_case__ , snake_case__ )
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = MgpstrProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ = processor(text=snake_case__ , images=snake_case__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def __a ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = MgpstrProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
SCREAMING_SNAKE_CASE_ = torch.randn(1 , 27 , 38 )
SCREAMING_SNAKE_CASE_ = torch.randn(1 , 27 , 5_02_57 )
SCREAMING_SNAKE_CASE_ = torch.randn(1 , 27 , 3_05_22 )
SCREAMING_SNAKE_CASE_ = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ['generated_text', 'scores', 'char_preds', 'bpe_preds', 'wp_preds'] ) | 360 |
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
SCREAMING_SNAKE_CASE: Optional[int] = 2_9_9_7_9_2_4_5_8
# Symbols
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE: Tuple = symbols('''ct x y z''')
def _a ( lowerCAmelCase )-> float:
if velocity > c:
raise ValueError('Speed must not exceed light speed 299,792,458 [m/s]!' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('Speed must be greater than or equal to 1!' )
return velocity / c
def _a ( lowerCAmelCase )-> float:
return 1 / sqrt(1 - beta(lowerCAmelCase ) ** 2 )
def _a ( lowerCAmelCase )-> np.ndarray:
return np.array(
[
[gamma(lowerCAmelCase ), -gamma(lowerCAmelCase ) * beta(lowerCAmelCase ), 0, 0],
[-gamma(lowerCAmelCase ) * beta(lowerCAmelCase ), gamma(lowerCAmelCase ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def _a ( lowerCAmelCase , lowerCAmelCase = None )-> np.ndarray:
# Ensure event is not empty
if event is None:
SCREAMING_SNAKE_CASE_ = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(lowerCAmelCase ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
SCREAMING_SNAKE_CASE: int = transform(2_9_9_7_9_2_4_5)
print('''Example of four vector: ''')
print(f"""ct' = {four_vector[0]}""")
print(f"""x' = {four_vector[1]}""")
print(f"""y' = {four_vector[2]}""")
print(f"""z' = {four_vector[3]}""")
# Substitute symbols with numerical values
SCREAMING_SNAKE_CASE: List[Any] = {ct: c, x: 1, y: 1, z: 1}
SCREAMING_SNAKE_CASE: Optional[Any] = [four_vector[i].subs(sub_dict) for i in range(4)]
print(f"""\n{numerical_vector}""") | 360 | 1 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class snake_case :
"""simple docstring"""
@staticmethod
def snake_case__ ( *lowerCAmelCase_ , **lowerCAmelCase_ ):
pass
@is_pipeline_test
@require_torch
@require_vision
class snake_case ( unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
__lowercase = [
{
"image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"question": "How many cats are there?",
},
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"question": "How many cats are there?",
},
]
return vqa_pipeline, examples
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase = vqa_pipeline(lowerCAmelCase_ , top_k=1 )
self.assertEqual(
lowerCAmelCase_ , [
[{"score": ANY(lowerCAmelCase_ ), "answer": ANY(lowerCAmelCase_ )}],
[{"score": ANY(lowerCAmelCase_ ), "answer": ANY(lowerCAmelCase_ )}],
] , )
@require_torch
def snake_case__ ( self ):
__lowercase = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
__lowercase = "./tests/fixtures/tests_samples/COCO/000000039769.png"
__lowercase = "How many cats are there?"
__lowercase = vqa_pipeline(image=lowerCAmelCase_ , question="How many cats are there?" , top_k=2 )
self.assertEqual(
lowerCAmelCase_ , [{"score": ANY(lowerCAmelCase_ ), "answer": ANY(lowerCAmelCase_ )}, {"score": ANY(lowerCAmelCase_ ), "answer": ANY(lowerCAmelCase_ )}] )
__lowercase = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
lowerCAmelCase_ , [{"score": ANY(lowerCAmelCase_ ), "answer": ANY(lowerCAmelCase_ )}, {"score": ANY(lowerCAmelCase_ ), "answer": ANY(lowerCAmelCase_ )}] )
@slow
@require_torch
def snake_case__ ( self ):
__lowercase = pipeline("visual-question-answering" , model="dandelin/vilt-b32-finetuned-vqa" )
__lowercase = "./tests/fixtures/tests_samples/COCO/000000039769.png"
__lowercase = "How many cats are there?"
__lowercase = vqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [{"score": 0.87_99, "answer": "2"}, {"score": 0.2_96, "answer": "1"}] )
__lowercase = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [{"score": 0.87_99, "answer": "2"}, {"score": 0.2_96, "answer": "1"}] )
__lowercase = vqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [[{"score": 0.87_99, "answer": "2"}, {"score": 0.2_96, "answer": "1"}]] * 2 , )
@require_tf
@unittest.skip("Visual question answering not implemented in TF" )
def snake_case__ ( self ):
pass
| 576 | import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('1.6'):
lowerCAmelCase__ = True
from torch.cuda.amp import autocast
lowerCAmelCase__ = logging.getLogger(__name__)
def __lowercase ( _UpperCAmelCase=None , _UpperCAmelCase=None ) -> List[str]:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=_UpperCAmelCase )
@dataclass
class snake_case :
"""simple docstring"""
__lowerCAmelCase = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__lowerCAmelCase = field(
default=__snake_case ,metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} ,)
__lowerCAmelCase = field(
default=__snake_case ,metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} )
__lowerCAmelCase = field(
default=0.1 ,metadata={"""help""": """The dropout ratio for the attention probabilities."""} )
__lowerCAmelCase = field(
default=0.1 ,metadata={"""help""": """The dropout ratio for activations inside the fully connected layer."""} )
__lowerCAmelCase = field(
default=0.1 ,metadata={
"""help""": """The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."""
} ,)
__lowerCAmelCase = field(
default=0.1 ,metadata={"""help""": """The dropout probabilitiy for all 1D convolutional layers in feature extractor."""} ,)
__lowerCAmelCase = field(
default=0.05 ,metadata={
"""help""": (
"""Propability of each feature vector along the time axis to be chosen as the start of the vector"""
"""span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"""
"""vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."""
)
} ,)
__lowerCAmelCase = field(default=0.0 ,metadata={"""help""": """The LayerDrop probability."""} )
@dataclass
class snake_case :
"""simple docstring"""
__lowerCAmelCase = field(
default=__snake_case ,metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
__lowerCAmelCase = field(
default="""train+validation""" ,metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'"""
} ,)
__lowerCAmelCase = field(
default=__snake_case ,metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
__lowerCAmelCase = field(
default=__snake_case ,metadata={"""help""": """The number of processes to use for the preprocessing."""} ,)
__lowerCAmelCase = field(
default=__snake_case ,metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} ,)
__lowerCAmelCase = field(
default=__snake_case ,metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of validation examples to this """
"""value if set."""
)
} ,)
__lowerCAmelCase = list_field(
default=[""",""", """?""", """.""", """!""", """-""", """;""", """:""", """\"\"""", """%""", """'""", """\"""", """�"""] ,metadata={"""help""": """A list of characters to remove from the transcripts."""} ,)
@dataclass
class snake_case :
"""simple docstring"""
__lowerCAmelCase = 42
__lowerCAmelCase = True
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
def __call__( self , lowerCAmelCase_ ):
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
__lowercase = [{"input_values": feature["input_values"]} for feature in features]
__lowercase = [{"input_ids": feature["labels"]} for feature in features]
__lowercase = self.processor.pad(
lowerCAmelCase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
__lowercase = self.processor.pad(
labels=lowerCAmelCase_ , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors="pt" , )
# replace padding with -100 to ignore loss correctly
__lowercase = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1 ) , -100 )
__lowercase = labels
return batch
class snake_case ( __snake_case ):
"""simple docstring"""
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
model.train()
__lowercase = self._prepare_inputs(lowerCAmelCase_ )
if self.use_amp:
with autocast():
__lowercase = self.compute_loss(lowerCAmelCase_ , lowerCAmelCase_ )
else:
__lowercase = self.compute_loss(lowerCAmelCase_ , lowerCAmelCase_ )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
__lowercase = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
__lowercase = loss.sum() / (inputs["labels"] >= 0).sum()
else:
raise ValueError(f'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
__lowercase = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(lowerCAmelCase_ ).backward()
elif self.use_apex:
with amp.scale_loss(lowerCAmelCase_ , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(lowerCAmelCase_ )
else:
loss.backward()
return loss.detach()
def __lowercase ( ) -> Any:
'''simple docstring'''
__lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowercase , __lowercase , __lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowercase , __lowercase , __lowercase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__lowercase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowercase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , _UpperCAmelCase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
__lowercase = datasets.load_dataset(
"common_voice" , data_args.dataset_config_name , split=data_args.train_split_name )
__lowercase = datasets.load_dataset("common_voice" , data_args.dataset_config_name , split="test" )
# Create and save tokenizer
__lowercase = f'''[{"".join(data_args.chars_to_ignore )}]'''
def remove_special_characters(_UpperCAmelCase ):
__lowercase = re.sub(_UpperCAmelCase , "" , batch["sentence"] ).lower() + " "
return batch
__lowercase = train_dataset.map(_UpperCAmelCase , remove_columns=["sentence"] )
__lowercase = eval_dataset.map(_UpperCAmelCase , remove_columns=["sentence"] )
def extract_all_chars(_UpperCAmelCase ):
__lowercase = " ".join(batch["text"] )
__lowercase = list(set(_UpperCAmelCase ) )
return {"vocab": [vocab], "all_text": [all_text]}
__lowercase = train_dataset.map(
_UpperCAmelCase , batched=_UpperCAmelCase , batch_size=-1 , keep_in_memory=_UpperCAmelCase , remove_columns=train_dataset.column_names , )
__lowercase = train_dataset.map(
_UpperCAmelCase , batched=_UpperCAmelCase , batch_size=-1 , keep_in_memory=_UpperCAmelCase , remove_columns=eval_dataset.column_names , )
__lowercase = list(set(vocab_train["vocab"][0] ) | set(vocab_test["vocab"][0] ) )
__lowercase = {v: k for k, v in enumerate(_UpperCAmelCase )}
__lowercase = vocab_dict[" "]
del vocab_dict[" "]
__lowercase = len(_UpperCAmelCase )
__lowercase = len(_UpperCAmelCase )
with open("vocab.json" , "w" ) as vocab_file:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowercase = WavaVecaCTCTokenizer(
"vocab.json" , unk_token="[UNK]" , pad_token="[PAD]" , word_delimiter_token="|" , )
__lowercase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0.0 , do_normalize=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase )
__lowercase = WavaVecaProcessor(feature_extractor=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
__lowercase = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction="mean" , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
__lowercase = min(len(_UpperCAmelCase ) , data_args.max_train_samples )
__lowercase = train_dataset.select(range(_UpperCAmelCase ) )
if data_args.max_val_samples is not None:
__lowercase = eval_dataset.select(range(data_args.max_val_samples ) )
__lowercase = torchaudio.transforms.Resample(48_000 , 16_000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(_UpperCAmelCase ):
__lowercase , __lowercase = torchaudio.load(batch["path"] )
__lowercase = resampler(_UpperCAmelCase ).squeeze().numpy()
__lowercase = 16_000
__lowercase = batch["text"]
return batch
__lowercase = train_dataset.map(
_UpperCAmelCase , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
__lowercase = eval_dataset.map(
_UpperCAmelCase , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(_UpperCAmelCase ):
# check that all files have the correct sampling rate
assert (
len(set(batch["sampling_rate"] ) ) == 1
), f'''Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}.'''
__lowercase = processor(
audio=batch["speech"] , text=batch["target_text"] , sampling_rate=batch["sampling_rate"][0] )
batch.update(_UpperCAmelCase )
return batch
__lowercase = train_dataset.map(
_UpperCAmelCase , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=_UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , )
__lowercase = eval_dataset.map(
_UpperCAmelCase , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=_UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , )
# Metric
__lowercase = datasets.load_metric("wer" )
def compute_metrics(_UpperCAmelCase ):
__lowercase = pred.predictions
__lowercase = np.argmax(_UpperCAmelCase , axis=-1 )
__lowercase = processor.tokenizer.pad_token_id
__lowercase = processor.batch_decode(_UpperCAmelCase )
# we do not want to group tokens when computing the metrics
__lowercase = processor.batch_decode(pred.label_ids , group_tokens=_UpperCAmelCase )
__lowercase = wer_metric.compute(predictions=_UpperCAmelCase , references=_UpperCAmelCase )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
__lowercase = DataCollatorCTCWithPadding(processor=_UpperCAmelCase , padding=_UpperCAmelCase )
# Initialize our Trainer
__lowercase = CTCTrainer(
model=_UpperCAmelCase , data_collator=_UpperCAmelCase , args=_UpperCAmelCase , compute_metrics=_UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
__lowercase = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
__lowercase = model_args.model_name_or_path
else:
__lowercase = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
__lowercase = trainer.train(resume_from_checkpoint=_UpperCAmelCase )
trainer.save_model()
__lowercase = train_result.metrics
__lowercase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_UpperCAmelCase )
)
__lowercase = min(_UpperCAmelCase , len(_UpperCAmelCase ) )
trainer.log_metrics("train" , _UpperCAmelCase )
trainer.save_metrics("train" , _UpperCAmelCase )
trainer.save_state()
# Evaluation
__lowercase = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
__lowercase = trainer.evaluate()
__lowercase = data_args.max_val_samples if data_args.max_val_samples is not None else len(_UpperCAmelCase )
__lowercase = min(_UpperCAmelCase , len(_UpperCAmelCase ) )
trainer.log_metrics("eval" , _UpperCAmelCase )
trainer.save_metrics("eval" , _UpperCAmelCase )
return results
if __name__ == "__main__":
main()
| 576 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase = {
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig''',
'''InstructBlipQFormerConfig''',
'''InstructBlipVisionConfig''',
],
'''processing_instructblip''': ['''InstructBlipProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InstructBlipQFormerModel''',
'''InstructBlipPreTrainedModel''',
'''InstructBlipForConditionalGeneration''',
'''InstructBlipVisionModel''',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 243 |
import itertools
import math
def lowerCAmelCase__( lowercase : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowercase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCAmelCase__( ) -> Optional[int]:
__snake_case : List[Any] = 2
while True:
if is_prime(lowercase ):
yield num
num += 1
def lowerCAmelCase__( lowercase : int = 1_0001 ) -> int:
return next(itertools.islice(prime_generator() , nth - 1 , lowercase ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 243 | 1 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> float:
'''simple docstring'''
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod() | 386 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCAmelCase : int = logging.get_logger(__name__)
_lowerCAmelCase : Optional[Any] = {
'''facebook/detr-resnet-50''': '''https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json''',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class A_ ( _a ):
lowerCAmelCase__ = 'detr'
lowerCAmelCase__ = ['past_key_values']
lowerCAmelCase__ = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self: Optional[Any] ,__lowerCAmelCase: List[str]=True ,__lowerCAmelCase: str=None ,__lowerCAmelCase: Tuple=3 ,__lowerCAmelCase: Any=100 ,__lowerCAmelCase: Dict=6 ,__lowerCAmelCase: str=2_048 ,__lowerCAmelCase: List[Any]=8 ,__lowerCAmelCase: Union[str, Any]=6 ,__lowerCAmelCase: Optional[int]=2_048 ,__lowerCAmelCase: Dict=8 ,__lowerCAmelCase: Optional[int]=0.0 ,__lowerCAmelCase: int=0.0 ,__lowerCAmelCase: int=True ,__lowerCAmelCase: int="relu" ,__lowerCAmelCase: Optional[int]=256 ,__lowerCAmelCase: Union[str, Any]=0.1 ,__lowerCAmelCase: Tuple=0.0 ,__lowerCAmelCase: List[Any]=0.0 ,__lowerCAmelCase: Union[str, Any]=0.02 ,__lowerCAmelCase: Tuple=1.0 ,__lowerCAmelCase: str=False ,__lowerCAmelCase: List[Any]="sine" ,__lowerCAmelCase: List[Any]="resnet50" ,__lowerCAmelCase: Optional[Any]=True ,__lowerCAmelCase: Union[str, Any]=False ,__lowerCAmelCase: int=1 ,__lowerCAmelCase: Union[str, Any]=5 ,__lowerCAmelCase: Tuple=2 ,__lowerCAmelCase: Any=1 ,__lowerCAmelCase: List[Any]=1 ,__lowerCAmelCase: int=5 ,__lowerCAmelCase: List[Any]=2 ,__lowerCAmelCase: List[str]=0.1 ,**__lowerCAmelCase: Union[str, Any] ,):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
_lowerCamelCase : Union[str, Any] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : Union[str, Any] = backbone_config.get("model_type" )
_lowerCamelCase : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
_lowerCamelCase : Optional[Any] = config_class.from_dict(__lowerCAmelCase )
# set timm attributes to None
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Tuple = None, None, None
_lowerCamelCase : Optional[int] = use_timm_backbone
_lowerCamelCase : List[Any] = backbone_config
_lowerCamelCase : List[Any] = num_channels
_lowerCamelCase : Any = num_queries
_lowerCamelCase : List[str] = d_model
_lowerCamelCase : Any = encoder_ffn_dim
_lowerCamelCase : List[str] = encoder_layers
_lowerCamelCase : List[Any] = encoder_attention_heads
_lowerCamelCase : List[Any] = decoder_ffn_dim
_lowerCamelCase : Optional[Any] = decoder_layers
_lowerCamelCase : str = decoder_attention_heads
_lowerCamelCase : List[str] = dropout
_lowerCamelCase : Union[str, Any] = attention_dropout
_lowerCamelCase : Union[str, Any] = activation_dropout
_lowerCamelCase : int = activation_function
_lowerCamelCase : List[Any] = init_std
_lowerCamelCase : int = init_xavier_std
_lowerCamelCase : Union[str, Any] = encoder_layerdrop
_lowerCamelCase : List[str] = decoder_layerdrop
_lowerCamelCase : int = encoder_layers
_lowerCamelCase : Any = auxiliary_loss
_lowerCamelCase : Tuple = position_embedding_type
_lowerCamelCase : int = backbone
_lowerCamelCase : int = use_pretrained_backbone
_lowerCamelCase : Dict = dilation
# Hungarian matcher
_lowerCamelCase : Tuple = class_cost
_lowerCamelCase : List[str] = bbox_cost
_lowerCamelCase : int = giou_cost
# Loss coefficients
_lowerCamelCase : List[Any] = mask_loss_coefficient
_lowerCamelCase : Optional[Any] = dice_loss_coefficient
_lowerCamelCase : Dict = bbox_loss_coefficient
_lowerCamelCase : Tuple = giou_loss_coefficient
_lowerCamelCase : Any = eos_coefficient
super().__init__(is_encoder_decoder=__lowerCAmelCase ,**__lowerCAmelCase )
@property
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def _lowercase ( self: List[Any] ):
'''simple docstring'''
return self.d_model
@classmethod
def _lowercase ( cls: Dict ,__lowerCAmelCase: PretrainedConfig ,**__lowerCAmelCase: int ):
'''simple docstring'''
return cls(backbone_config=__lowerCAmelCase ,**__lowerCAmelCase )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : int = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
_lowerCamelCase : Union[str, Any] = self.backbone_config.to_dict()
_lowerCamelCase : List[str] = self.__class__.model_type
return output
class A_ ( _a ):
lowerCAmelCase__ = version.parse('1.11' )
@property
def _lowercase ( self: List[str] ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def _lowercase ( self: List[Any] ):
'''simple docstring'''
return 1e-5
@property
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
return 12 | 386 | 1 |
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
UpperCamelCase = 'src/diffusers'
# Matches is_xxx_available()
UpperCamelCase = re.compile(R'is\_([a-z_]*)_available\(\)')
# Matches from xxx import bla
UpperCamelCase = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
UpperCamelCase = '\n{0} = None\n'
UpperCamelCase = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n'
UpperCamelCase = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
def _A ( lowerCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = _re_backend.findall(lowerCAmelCase_ )
if len(lowerCAmelCase_ ) == 0:
return None
return "_and_".join(lowerCAmelCase_ )
def _A ( ):
"""simple docstring"""
with open(os.path.join(lowerCAmelCase_ , "__init__.py" ) , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase__ = f.readlines()
# Get to the point we do the actual imports for type checking
lowerCAmelCase__ = 0
lowerCAmelCase__ = {}
# Go through the end of the file
while line_index < len(lowerCAmelCase_ ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
lowerCAmelCase__ = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("else:" ):
line_index += 1
line_index += 1
lowerCAmelCase__ = []
# Until we unindent, add backend objects to the list
while line_index < len(lowerCAmelCase_ ) and len(lines[line_index] ) > 1:
lowerCAmelCase__ = lines[line_index]
lowerCAmelCase__ = _re_single_line_import.search(lowerCAmelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(lowerCAmelCase_ ) > 0:
lowerCAmelCase__ = objects
else:
line_index += 1
return backend_specific_objects
def _A ( lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any] ):
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(lowerCAmelCase_ )
elif name.islower():
return DUMMY_FUNCTION.format(lowerCAmelCase_ , lowerCAmelCase_ )
else:
return DUMMY_CLASS.format(lowerCAmelCase_ , lowerCAmelCase_ )
def _A ( lowerCAmelCase_ : Tuple=None ):
"""simple docstring"""
if backend_specific_objects is None:
lowerCAmelCase__ = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
lowerCAmelCase__ = {}
for backend, objects in backend_specific_objects.items():
lowerCAmelCase__ = "[" + ", ".join(F'"{b}"' for b in backend.split("_and_" ) ) + "]"
lowerCAmelCase__ = "# This file is autogenerated by the command `make fix-copies`, do not edit.\n"
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(lowerCAmelCase_ , lowerCAmelCase_ ) for o in objects] )
lowerCAmelCase__ = dummy_file
return dummy_files
def _A ( lowerCAmelCase_ : Optional[int]=False ):
"""simple docstring"""
lowerCAmelCase__ = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
lowerCAmelCase__ = {"torch": "pt"}
# Locate actual dummy modules and read their content.
lowerCAmelCase__ = os.path.join(lowerCAmelCase_ , "utils" )
lowerCAmelCase__ = {
backend: os.path.join(lowerCAmelCase_ , F'dummy_{short_names.get(lowerCAmelCase_ , lowerCAmelCase_ )}_objects.py' )
for backend in dummy_files.keys()
}
lowerCAmelCase__ = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(lowerCAmelCase_ ):
with open(lowerCAmelCase_ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase__ = f.read()
else:
lowerCAmelCase__ = ""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'Updating diffusers.utils.dummy_{short_names.get(lowerCAmelCase_ , lowerCAmelCase_ )}_objects.py as the main '
"__init__ has new objects." )
with open(dummy_file_paths[backend] , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"The main __init__ has objects that are not present in "
F'diffusers.utils.dummy_{short_names.get(lowerCAmelCase_ , lowerCAmelCase_ )}_objects.py. Run `make fix-copies` '
"to fix this." )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
UpperCamelCase = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 61 |
'''simple docstring'''
_SCREAMING_SNAKE_CASE = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
_SCREAMING_SNAKE_CASE = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
_SCREAMING_SNAKE_CASE = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 502 | 0 |
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
_lowercase = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE_ : Tuple =np.argmax(UpperCAmelCase_ , axis=1 )
return np.sum(outputs == labels )
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Optional[Any] ) -> Optional[int]:
with open(UpperCAmelCase_ , encoding='''utf_8''' ) as f:
SCREAMING_SNAKE_CASE_ : List[str] =csv.reader(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : str =[]
next(UpperCAmelCase_ ) # skip the first line
for line in tqdm(UpperCAmelCase_ ):
output.append((''' '''.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE_ : Optional[int] =[]
for dataset in encoded_datasets:
SCREAMING_SNAKE_CASE_ : int =len(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Tuple =np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
SCREAMING_SNAKE_CASE_ : List[Any] =np.zeros((n_batch, 2) , dtype=np.intaa )
SCREAMING_SNAKE_CASE_ : Optional[Any] =np.full((n_batch, 2, input_len) , fill_value=-1_0_0 , dtype=np.intaa )
SCREAMING_SNAKE_CASE_ : str =np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE_ : Dict =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
SCREAMING_SNAKE_CASE_ : Dict =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
SCREAMING_SNAKE_CASE_ : Tuple =with_conta
SCREAMING_SNAKE_CASE_ : Any =with_conta
SCREAMING_SNAKE_CASE_ : int =len(UpperCAmelCase_ ) - 1
SCREAMING_SNAKE_CASE_ : List[str] =len(UpperCAmelCase_ ) - 1
SCREAMING_SNAKE_CASE_ : Any =with_conta
SCREAMING_SNAKE_CASE_ : str =with_conta
SCREAMING_SNAKE_CASE_ : Dict =mc_label
SCREAMING_SNAKE_CASE_ : Tuple =(input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(UpperCAmelCase_ ) for t in all_inputs ) )
return tensor_datasets
def SCREAMING_SNAKE_CASE_ ( ) -> Tuple:
SCREAMING_SNAKE_CASE_ : List[Any] =argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=UpperCAmelCase_ , default='''openai-gpt''' , help='''pretrained model name''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_eval''' , action='''store_true''' , help='''Whether to run eval on the dev set.''' )
parser.add_argument(
'''--output_dir''' , default=UpperCAmelCase_ , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument('''--train_dataset''' , type=UpperCAmelCase_ , default='''''' )
parser.add_argument('''--eval_dataset''' , type=UpperCAmelCase_ , default='''''' )
parser.add_argument('''--seed''' , type=UpperCAmelCase_ , default=4_2 )
parser.add_argument('''--num_train_epochs''' , type=UpperCAmelCase_ , default=3 )
parser.add_argument('''--train_batch_size''' , type=UpperCAmelCase_ , default=8 )
parser.add_argument('''--eval_batch_size''' , type=UpperCAmelCase_ , default=1_6 )
parser.add_argument('''--adam_epsilon''' , default=1E-8 , type=UpperCAmelCase_ , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , type=UpperCAmelCase_ , default=1 )
parser.add_argument(
'''--max_steps''' , default=-1 , type=UpperCAmelCase_ , help=(
'''If > 0: set total number of training steps to perform. Override num_train_epochs.'''
) , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=UpperCAmelCase_ , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--learning_rate''' , type=UpperCAmelCase_ , default=6.25E-5 )
parser.add_argument('''--warmup_steps''' , default=0 , type=UpperCAmelCase_ , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--lr_schedule''' , type=UpperCAmelCase_ , default='''warmup_linear''' )
parser.add_argument('''--weight_decay''' , type=UpperCAmelCase_ , default=0.01 )
parser.add_argument('''--lm_coef''' , type=UpperCAmelCase_ , default=0.9 )
parser.add_argument('''--n_valid''' , type=UpperCAmelCase_ , default=3_7_4 )
parser.add_argument('''--server_ip''' , type=UpperCAmelCase_ , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=UpperCAmelCase_ , default='''''' , help='''Can be used for distant debugging.''' )
SCREAMING_SNAKE_CASE_ : Tuple =parser.parse_args()
print(UpperCAmelCase_ )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=UpperCAmelCase_ )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
SCREAMING_SNAKE_CASE_ : Any =torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =torch.cuda.device_count()
logger.info('''device: {}, n_gpu {}'''.format(UpperCAmelCase_ , UpperCAmelCase_ ) )
if not args.do_train and not args.do_eval:
raise ValueError('''At least one of `do_train` or `do_eval` must be True.''' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
SCREAMING_SNAKE_CASE_ : Optional[Any] =['''_start_''', '''_delimiter_''', '''_classify_''']
SCREAMING_SNAKE_CASE_ : Optional[Any] =OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Any =tokenizer.convert_tokens_to_ids(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(UpperCAmelCase_ ) )
model.to(UpperCAmelCase_ )
# Load and encode the datasets
def tokenize_and_encode(UpperCAmelCase_ : Any ):
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(UpperCAmelCase_ ) )
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
return obj
return [tokenize_and_encode(UpperCAmelCase_ ) for o in obj]
logger.info('''Encoding dataset...''' )
SCREAMING_SNAKE_CASE_ : List[str] =load_rocstories_dataset(args.train_dataset )
SCREAMING_SNAKE_CASE_ : Any =load_rocstories_dataset(args.eval_dataset )
SCREAMING_SNAKE_CASE_ : List[str] =(train_dataset, eval_dataset)
SCREAMING_SNAKE_CASE_ : Tuple =tokenize_and_encode(UpperCAmelCase_ )
# Compute the max input length for the Transformer
SCREAMING_SNAKE_CASE_ : Optional[int] =model.config.n_positions // 2 - 2
SCREAMING_SNAKE_CASE_ : Tuple =max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
SCREAMING_SNAKE_CASE_ : Dict =min(UpperCAmelCase_ , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
SCREAMING_SNAKE_CASE_ : Optional[Any] =pre_process_datasets(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , *UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : str =tensor_datasets[0], tensor_datasets[1]
SCREAMING_SNAKE_CASE_ : Union[str, Any] =TensorDataset(*UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =RandomSampler(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Tuple =DataLoader(UpperCAmelCase_ , sampler=UpperCAmelCase_ , batch_size=args.train_batch_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =TensorDataset(*UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : int =SequentialSampler(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Dict =DataLoader(UpperCAmelCase_ , sampler=UpperCAmelCase_ , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
SCREAMING_SNAKE_CASE_ : List[Any] =args.max_steps
SCREAMING_SNAKE_CASE_ : List[str] =args.max_steps // (len(UpperCAmelCase_ ) // args.gradient_accumulation_steps) + 1
else:
SCREAMING_SNAKE_CASE_ : Any =len(UpperCAmelCase_ ) // args.gradient_accumulation_steps * args.num_train_epochs
SCREAMING_SNAKE_CASE_ : str =list(model.named_parameters() )
SCREAMING_SNAKE_CASE_ : Tuple =['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight''']
SCREAMING_SNAKE_CASE_ : str =[
{
'''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'''weight_decay''': args.weight_decay,
},
{'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0},
]
SCREAMING_SNAKE_CASE_ : Any =AdamW(UpperCAmelCase_ , lr=args.learning_rate , eps=args.adam_epsilon )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =get_linear_schedule_with_warmup(
UpperCAmelCase_ , num_warmup_steps=args.warmup_steps , num_training_steps=UpperCAmelCase_ )
if args.do_train:
SCREAMING_SNAKE_CASE_ : Tuple =0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='''Epoch''' ):
SCREAMING_SNAKE_CASE_ : Dict =0
SCREAMING_SNAKE_CASE_ : Dict =0
SCREAMING_SNAKE_CASE_ : List[str] =tqdm(UpperCAmelCase_ , desc='''Training''' )
for step, batch in enumerate(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] =tuple(t.to(UpperCAmelCase_ ) for t in batch )
SCREAMING_SNAKE_CASE_ : Any =batch
SCREAMING_SNAKE_CASE_ : Tuple =model(UpperCAmelCase_ , mc_token_ids=UpperCAmelCase_ , lm_labels=UpperCAmelCase_ , mc_labels=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
SCREAMING_SNAKE_CASE_ : Dict =(
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
SCREAMING_SNAKE_CASE_ : Dict ='''Training loss: {:.2e} lr: {:.2e}'''.format(UpperCAmelCase_ , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
SCREAMING_SNAKE_CASE_ : str =model.module if hasattr(UpperCAmelCase_ , '''module''' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
SCREAMING_SNAKE_CASE_ : str =os.path.join(args.output_dir , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : List[Any] =os.path.join(args.output_dir , UpperCAmelCase_ )
torch.save(model_to_save.state_dict() , UpperCAmelCase_ )
model_to_save.config.to_json_file(UpperCAmelCase_ )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
SCREAMING_SNAKE_CASE_ : Any =OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
SCREAMING_SNAKE_CASE_ : Optional[int] =OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(UpperCAmelCase_ )
if args.do_eval:
model.eval()
SCREAMING_SNAKE_CASE_ : Union[str, Any] =0, 0
SCREAMING_SNAKE_CASE_ : Any =0, 0
for batch in tqdm(UpperCAmelCase_ , desc='''Evaluating''' ):
SCREAMING_SNAKE_CASE_ : Optional[Any] =tuple(t.to(UpperCAmelCase_ ) for t in batch )
SCREAMING_SNAKE_CASE_ : Any =batch
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Optional[int] =model(
UpperCAmelCase_ , mc_token_ids=UpperCAmelCase_ , lm_labels=UpperCAmelCase_ , mc_labels=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : int =mc_logits.detach().cpu().numpy()
SCREAMING_SNAKE_CASE_ : Dict =mc_labels.to('''cpu''' ).numpy()
SCREAMING_SNAKE_CASE_ : List[Any] =accuracy(UpperCAmelCase_ , UpperCAmelCase_ )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
SCREAMING_SNAKE_CASE_ : Any =eval_loss / nb_eval_steps
SCREAMING_SNAKE_CASE_ : Any =eval_accuracy / nb_eval_examples
SCREAMING_SNAKE_CASE_ : str =tr_loss / nb_tr_steps if args.do_train else None
SCREAMING_SNAKE_CASE_ : Union[str, Any] ={'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss}
SCREAMING_SNAKE_CASE_ : Optional[int] =os.path.join(args.output_dir , '''eval_results.txt''' )
with open(UpperCAmelCase_ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , UpperCAmelCase_ , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 718 |
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def SCREAMING_SNAKE_CASE_ ( ) -> List[Any]:
SCREAMING_SNAKE_CASE_ : Optional[Any] ={
'''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''],
'''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''],
'''content''': ['''a ''' * 2_0, '''a ''' * 3_0, '''b ''' * 7],
}
SCREAMING_SNAKE_CASE_ : Any =Dataset.from_dict(UpperCAmelCase_ )
return dataset
class lowercase_ ( A ):
def _snake_case ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ : int =get_dataset()
SCREAMING_SNAKE_CASE_ : Any =make_duplicate_clusters(__A , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def _snake_case ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ : Optional[int] =get_dataset()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict =deduplicate_dataset(__A )
self.assertEqual(len(__A ) , 2 )
print(__A )
self.assertEqual(duplicate_clusters[0][0]['''copies'''] , 2 )
self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''] , __A )
| 431 | 0 |
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
_snake_case = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
_snake_case = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
_snake_case = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
def _lowerCamelCase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"] , )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=False ):
"""simple docstring"""
if return_pvalue:
_lowercase : Tuple = pearsonr(_UpperCamelCase , _UpperCamelCase )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(_UpperCamelCase , _UpperCamelCase )[0] )}
| 245 |
'''simple docstring'''
def _A ( snake_case , snake_case ) -> str:
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
_lowercase : Dict = str(bin(snake_case ) )[2:] # remove the leading "0b"
_lowercase : str = str(bin(snake_case ) )[2:] # remove the leading "0b"
_lowercase : Optional[Any] = max(len(snake_case ) , len(snake_case ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(snake_case ) , b_binary.zfill(snake_case ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 245 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowercase : List[str] = logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : Optional[int] = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['''stage2''', '''stage3''', '''stage4'''] , )
A : str = DetaConfig(
backbone_config=snake_case__ , num_queries=900 , encoder_ffn_dim=2048 , decoder_ffn_dim=2048 , num_feature_levels=5 , assign_first_stage=snake_case__ , with_box_refine=snake_case__ , two_stage=snake_case__ , )
# set labels
A : List[Any] = '''huggingface/label-files'''
if "o365" in model_name:
A : Union[str, Any] = 366
A : Tuple = '''object365-id2label.json'''
else:
A : Tuple = 91
A : Optional[Any] = '''coco-detection-id2label.json'''
A : Optional[int] = num_labels
A : str = json.load(open(cached_download(hf_hub_url(snake_case__ , snake_case__ , repo_type='''dataset''' ) ) , '''r''' ) )
A : Optional[Any] = {int(snake_case__ ): v for k, v in idalabel.items()}
A : int = idalabel
A : Dict = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : Tuple = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.patch_embed.proj.weight''', '''model.backbone.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.proj.bias''', '''model.backbone.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.weight''', '''model.backbone.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.bias''', '''model.backbone.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm1.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm1.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm2.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm2.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((F'backbone.0.body.layers.{i}.downsample.reduction.weight', F'model.backbone.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.downsample.norm.weight', F'model.backbone.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.downsample.norm.bias', F'model.backbone.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append(('''backbone.0.body.norm1.weight''', '''model.backbone.model.hidden_states_norms.stage2.weight''') )
rename_keys.append(('''backbone.0.body.norm1.bias''', '''model.backbone.model.hidden_states_norms.stage2.bias''') )
rename_keys.append(('''backbone.0.body.norm2.weight''', '''model.backbone.model.hidden_states_norms.stage3.weight''') )
rename_keys.append(('''backbone.0.body.norm2.bias''', '''model.backbone.model.hidden_states_norms.stage3.bias''') )
rename_keys.append(('''backbone.0.body.norm3.weight''', '''model.backbone.model.hidden_states_norms.stage4.weight''') )
rename_keys.append(('''backbone.0.body.norm3.bias''', '''model.backbone.model.hidden_states_norms.stage4.bias''') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight', F'model.encoder.layers.{i}.self_attn.sampling_offsets.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias', F'model.encoder.layers.{i}.self_attn.sampling_offsets.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.attention_weights.weight', F'model.encoder.layers.{i}.self_attn.attention_weights.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.attention_weights.bias', F'model.encoder.layers.{i}.self_attn.attention_weights.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.value_proj.weight', F'model.encoder.layers.{i}.self_attn.value_proj.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.value_proj.bias', F'model.encoder.layers.{i}.self_attn.value_proj.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.output_proj.weight', F'model.encoder.layers.{i}.self_attn.output_proj.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.output_proj.bias', F'model.encoder.layers.{i}.self_attn.output_proj.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.weight', F'model.encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'model.encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'model.encoder.layers.{i}.fc1.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'model.encoder.layers.{i}.fc1.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'model.encoder.layers.{i}.fc2.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'model.encoder.layers.{i}.fc2.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'model.encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'model.encoder.layers.{i}.final_layer_norm.bias') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight', F'model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias', F'model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.attention_weights.weight', F'model.decoder.layers.{i}.encoder_attn.attention_weights.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.attention_weights.bias', F'model.decoder.layers.{i}.encoder_attn.attention_weights.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.value_proj.weight', F'model.decoder.layers.{i}.encoder_attn.value_proj.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.value_proj.bias', F'model.decoder.layers.{i}.encoder_attn.value_proj.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.output_proj.weight', F'model.decoder.layers.{i}.encoder_attn.output_proj.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.output_proj.bias', F'model.decoder.layers.{i}.encoder_attn.output_proj.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.weight', F'model.decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'model.decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'model.decoder.layers.{i}.self_attn.out_proj.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'model.decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm2.weight', F'model.decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm2.bias', F'model.decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'model.decoder.layers.{i}.fc1.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'model.decoder.layers.{i}.fc1.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'model.decoder.layers.{i}.fc2.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'model.decoder.layers.{i}.fc2.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'model.decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'model.decoder.layers.{i}.final_layer_norm.bias') )
# fmt: on
return rename_keys
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : Optional[Any] = dct.pop(snake_case__ )
A : int = val
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : Dict = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
A : Tuple = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
A : Dict = state_dict.pop(F'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight' )
A : Tuple = state_dict.pop(F'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
A : Union[str, Any] = in_proj_weight[:dim, :]
A : List[Any] = in_proj_bias[: dim]
A : Union[str, Any] = in_proj_weight[
dim : dim * 2, :
]
A : str = in_proj_bias[
dim : dim * 2
]
A : str = in_proj_weight[
-dim :, :
]
A : Dict = in_proj_bias[-dim :]
# fmt: on
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : List[Any] = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
A : List[Any] = state_dict.pop(F'transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
A : Tuple = state_dict.pop(F'transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
A : Any = in_proj_weight[:hidden_size, :]
A : List[str] = in_proj_bias[:hidden_size]
A : Optional[Any] = in_proj_weight[
hidden_size : hidden_size * 2, :
]
A : List[Any] = in_proj_bias[hidden_size : hidden_size * 2]
A : str = in_proj_weight[-hidden_size:, :]
A : Any = in_proj_bias[-hidden_size:]
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
A : Optional[Any] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : Any = get_deta_config(snake_case__ )
# load original state dict
if model_name == "deta-swin-large":
A : Union[str, Any] = hf_hub_download(repo_id='''nielsr/deta-checkpoints''' , filename='''adet_swin_ft.pth''' )
elif model_name == "deta-swin-large-o365":
A : Optional[int] = hf_hub_download(repo_id='''jozhang97/deta-swin-l-o365''' , filename='''deta_swin_pt_o365.pth''' )
else:
raise ValueError(F'Model name {model_name} not supported' )
A : List[Any] = torch.load(snake_case__ , map_location='''cpu''' )['''model''']
# original state dict
for name, param in state_dict.items():
print(snake_case__ , param.shape )
# rename keys
A : Dict = create_rename_keys(snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
read_in_swin_q_k_v(snake_case__ , config.backbone_config )
read_in_decoder_q_k_v(snake_case__ , snake_case__ )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
A : Union[str, Any] = state_dict.pop(snake_case__ )
A : Optional[Any] = val
if "input_proj" in key:
A : Dict = state_dict.pop(snake_case__ )
A : Dict = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
A : Dict = state_dict.pop(snake_case__ )
A : Optional[int] = val
# finally, create HuggingFace model and load state dict
A : Any = DetaForObjectDetection(snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
A : Tuple = '''cuda''' if torch.cuda.is_available() else '''cpu'''
model.to(snake_case__ )
# load image processor
A : Optional[Any] = DetaImageProcessor(format='''coco_detection''' )
# verify our conversion on image
A : Union[str, Any] = prepare_img()
A : Optional[Any] = processor(images=snake_case__ , return_tensors='''pt''' )
A : str = encoding['''pixel_values''']
A : Any = model(pixel_values.to(snake_case__ ) )
# verify logits
print('''Logits:''' , outputs.logits[0, :3, :3] )
print('''Boxes:''' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
A : Dict = torch.tensor(
[[-7.63_08, -2.84_85, -5.37_37], [-7.20_37, -4.55_05, -4.80_27], [-7.29_43, -4.26_11, -4.66_17]] )
A : List[Any] = torch.tensor([[0.49_87, 0.49_69, 0.99_99], [0.25_49, 0.54_98, 0.48_05], [0.54_98, 0.27_57, 0.05_69]] )
elif model_name == "deta-swin-large-o365":
A : Dict = torch.tensor(
[[-8.01_22, -3.57_20, -4.97_17], [-8.15_47, -3.68_86, -4.63_89], [-7.66_10, -3.61_94, -5.01_34]] )
A : Union[str, Any] = torch.tensor([[0.25_23, 0.55_49, 0.48_81], [0.77_15, 0.41_49, 0.46_01], [0.55_03, 0.27_53, 0.05_75]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(snake_case__ ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(snake_case__ ) , atol=1E-4 )
print('''Everything ok!''' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(F'Saving PyTorch model and processor to {pytorch_dump_folder_path}...' )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
processor.save_pretrained(snake_case__ )
# Push to hub
if push_to_hub:
print('''Pushing model and processor to hub...''' )
model.push_to_hub(F'jozhang97/{model_name}' )
processor.push_to_hub(F'jozhang97/{model_name}' )
if __name__ == "__main__":
lowercase : int = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
type=str,
default='deta-swin-large',
choices=['deta-swin-large', 'deta-swin-large-o365'],
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
help='Path to the folder to output PyTorch model.',
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
lowercase : Optional[Any] = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 343 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowercase : Union[str, Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
@dataclass
class A :
__magic_name__ = field(
default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
__magic_name__ = field(
default=__snake_case , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
__magic_name__ = field(
default=__snake_case , metadata={'''help''': '''The column name of the images in the files.'''} )
__magic_name__ = field(default=__snake_case , metadata={'''help''': '''A folder containing the training data.'''} )
__magic_name__ = field(default=__snake_case , metadata={'''help''': '''A folder containing the validation data.'''} )
__magic_name__ = field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
__magic_name__ = field(
default=__snake_case , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
__magic_name__ = field(
default=__snake_case , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : Tuple = {}
if self.train_dir is not None:
A : Optional[Any] = self.train_dir
if self.validation_dir is not None:
A : str = self.validation_dir
A : Dict = data_files if data_files else None
@dataclass
class A :
__magic_name__ = field(
default=__snake_case , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
__magic_name__ = field(
default=__snake_case , metadata={'''help''': '''Pretrained config name or path if not the same as model_name_or_path'''} )
__magic_name__ = field(
default=__snake_case , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
__magic_name__ = field(
default=__snake_case , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} )
__magic_name__ = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
__magic_name__ = field(default=__snake_case , metadata={'''help''': '''Name or path of preprocessor config.'''} )
__magic_name__ = field(
default=__snake_case , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
__magic_name__ = field(
default=0.75 , metadata={'''help''': '''The ratio of the number of masked tokens in the input sequence.'''} )
__magic_name__ = field(
default=__snake_case , metadata={'''help''': '''Whether or not to train with normalized pixel values as target.'''} )
@dataclass
class A ( __snake_case ):
__magic_name__ = field(
default=1E-3 , metadata={'''help''': '''Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'''} )
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : List[str] = torch.stack([example['''pixel_values'''] for example in examples] )
return {"pixel_values": pixel_values}
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A, A, A : List[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A, A, A : Any = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_mae''' , snake_case__ , snake_case__ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
A : Optional[int] = training_args.get_process_log_level()
logger.setLevel(snake_case__ )
transformers.utils.logging.set_verbosity(snake_case__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
A : str = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A : Union[str, Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset.
A : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
A : Optional[Any] = None if '''validation''' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , snake_case__ ) and data_args.train_val_split > 0.0:
A : Tuple = ds['''train'''].train_test_split(data_args.train_val_split )
A : str = split['''train''']
A : List[str] = split['''test''']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A : str = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
A : List[Any] = ViTMAEConfig.from_pretrained(model_args.config_name , **snake_case__ )
elif model_args.model_name_or_path:
A : Tuple = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **snake_case__ )
else:
A : Any = ViTMAEConfig()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(F'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(F'New config: {config}' )
# adapt config
config.update(
{
'''mask_ratio''': model_args.mask_ratio,
'''norm_pix_loss''': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
A : List[str] = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **snake_case__ )
elif model_args.model_name_or_path:
A : List[str] = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **snake_case__ )
else:
A : Optional[int] = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
A : int = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=snake_case__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
A : Union[str, Any] = ViTMAEForPreTraining(snake_case__ )
if training_args.do_train:
A : int = ds['''train'''].column_names
else:
A : Tuple = ds['''validation'''].column_names
if data_args.image_column_name is not None:
A : Optional[int] = data_args.image_column_name
elif "image" in column_names:
A : List[Any] = '''image'''
elif "img" in column_names:
A : Any = '''img'''
else:
A : Optional[Any] = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
A : str = image_processor.size['''shortest_edge''']
else:
A : List[Any] = (image_processor.size['''height'''], image_processor.size['''width'''])
A : List[Any] = Compose(
[
Lambda(lambda snake_case__ : img.convert('''RGB''' ) if img.mode != "RGB" else img ),
RandomResizedCrop(snake_case__ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(snake_case__ ):
A : str = [transforms(snake_case__ ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
A : Optional[int] = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(snake_case__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
A : List[str] = (
ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(snake_case__ )
# Compute absolute learning rate
A : Tuple = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
A : List[Any] = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
A : List[Any] = Trainer(
model=snake_case__ , args=snake_case__ , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=snake_case__ , data_collator=snake_case__ , )
# Training
if training_args.do_train:
A : List[str] = None
if training_args.resume_from_checkpoint is not None:
A : List[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
A : Any = last_checkpoint
A : List[Any] = trainer.train(resume_from_checkpoint=snake_case__ )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
A : Optional[int] = trainer.evaluate()
trainer.log_metrics('''eval''' , snake_case__ )
trainer.save_metrics('''eval''' , snake_case__ )
# Write model card and (optionally) push to hub
A : Tuple = {
'''tasks''': '''masked-auto-encoding''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''masked-auto-encoding'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**snake_case__ )
else:
trainer.create_model_card(**snake_case__ )
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 343 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class _SCREAMING_SNAKE_CASE( lowerCamelCase__ ):
SCREAMING_SNAKE_CASE_ : Optional[torch.FloatTensor] = None
SCREAMING_SNAKE_CASE_ : torch.FloatTensor = None
SCREAMING_SNAKE_CASE_ : Optional[Tuple[torch.FloatTensor]] = None
SCREAMING_SNAKE_CASE_ : Optional[Tuple[torch.FloatTensor]] = None
class _SCREAMING_SNAKE_CASE( lowerCamelCase__ ):
def __init__( self ,SCREAMING_SNAKE_CASE__=1 ,SCREAMING_SNAKE_CASE__=0 ,SCREAMING_SNAKE_CASE__=2 ,SCREAMING_SNAKE_CASE__=5_12 ,SCREAMING_SNAKE_CASE__="cls" ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=True ,**SCREAMING_SNAKE_CASE__ ,) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=__UpperCamelCase ,bos_token_id=__UpperCamelCase ,eos_token_id=__UpperCamelCase ,**__UpperCamelCase )
__SCREAMING_SNAKE_CASE :str = project_dim
__SCREAMING_SNAKE_CASE :Union[str, Any] = pooler_fn
__SCREAMING_SNAKE_CASE :List[Any] = learn_encoder
__SCREAMING_SNAKE_CASE :Union[str, Any] = use_attention_mask
class _SCREAMING_SNAKE_CASE( lowerCamelCase__ ):
SCREAMING_SNAKE_CASE_ : Dict = [R'pooler', R'logit_scale']
SCREAMING_SNAKE_CASE_ : Optional[int] = [R'position_ids', R'predictions.decoder.bias']
SCREAMING_SNAKE_CASE_ : str = 'roberta'
SCREAMING_SNAKE_CASE_ : int = RobertaSeriesConfig
def __init__( self ,SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
super().__init__(__UpperCamelCase )
__SCREAMING_SNAKE_CASE :List[Any] = XLMRobertaModel(__UpperCamelCase )
__SCREAMING_SNAKE_CASE :int = nn.Linear(config.hidden_size ,config.project_dim )
__SCREAMING_SNAKE_CASE :str = getattr(__UpperCamelCase ,'''has_pre_transformation''' ,__UpperCamelCase )
if self.has_pre_transformation:
__SCREAMING_SNAKE_CASE :int = nn.Linear(config.hidden_size ,config.project_dim )
__SCREAMING_SNAKE_CASE :Optional[Any] = nn.LayerNorm(config.hidden_size ,eps=config.layer_norm_eps )
self.post_init()
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__ = None ,) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Tuple = return_dict if return_dict is not None else self.config.use_return_dict
__SCREAMING_SNAKE_CASE :Optional[Any] = self.base_model(
input_ids=__UpperCamelCase ,attention_mask=__UpperCamelCase ,token_type_ids=__UpperCamelCase ,position_ids=__UpperCamelCase ,head_mask=__UpperCamelCase ,inputs_embeds=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,output_attentions=__UpperCamelCase ,output_hidden_states=True if self.has_pre_transformation else output_hidden_states ,return_dict=__UpperCamelCase ,)
if self.has_pre_transformation:
__SCREAMING_SNAKE_CASE :Any = outputs["hidden_states"][-2]
__SCREAMING_SNAKE_CASE :Tuple = self.pre_LN(__UpperCamelCase )
__SCREAMING_SNAKE_CASE :str = self.transformation_pre(__UpperCamelCase )
return TransformationModelOutput(
projection_state=__UpperCamelCase ,last_hidden_state=outputs.last_hidden_state ,hidden_states=outputs.hidden_states ,attentions=outputs.attentions ,)
else:
__SCREAMING_SNAKE_CASE :int = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=__UpperCamelCase ,last_hidden_state=outputs.last_hidden_state ,hidden_states=outputs.hidden_states ,attentions=outputs.attentions ,) | 498 |
from ... import PretrainedConfig
lowercase : Dict = {
"sijunhe/nezha-cn-base": "https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
"""simple docstring"""
lowercase : List[str] = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
lowercase : Union[str, Any] = 'nezha'
def __init__( self , __UpperCamelCase=2_11_28 , __UpperCamelCase=7_68 , __UpperCamelCase=12 , __UpperCamelCase=12 , __UpperCamelCase=30_72 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=5_12 , __UpperCamelCase=64 , __UpperCamelCase=2 , __UpperCamelCase=0.02 , __UpperCamelCase=1E-12 , __UpperCamelCase=0.1 , __UpperCamelCase=0 , __UpperCamelCase=2 , __UpperCamelCase=3 , __UpperCamelCase=True , **__UpperCamelCase , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
__UpperCamelCase : int = vocab_size
__UpperCamelCase : int = hidden_size
__UpperCamelCase : Tuple = num_hidden_layers
__UpperCamelCase : Tuple = num_attention_heads
__UpperCamelCase : Optional[int] = hidden_act
__UpperCamelCase : List[str] = intermediate_size
__UpperCamelCase : Union[str, Any] = hidden_dropout_prob
__UpperCamelCase : Tuple = attention_probs_dropout_prob
__UpperCamelCase : Optional[int] = max_position_embeddings
__UpperCamelCase : str = max_relative_position
__UpperCamelCase : List[str] = type_vocab_size
__UpperCamelCase : Dict = initializer_range
__UpperCamelCase : Optional[int] = layer_norm_eps
__UpperCamelCase : int = classifier_dropout
__UpperCamelCase : List[str] = use_cache | 327 | 0 |
"""simple docstring"""
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''http://www.mocksite.com/file1.txt'''
SCREAMING_SNAKE_CASE_ : List[Any] = '''"text": ["foo", "foo"]'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8'''
class _A :
__a = 200
__a = {'Content-Length': '100'}
__a = {}
def _lowerCamelCase ( self , **SCREAMING_SNAKE_CASE__ ) -> str:
return [bytes(SCREAMING_SNAKE_CASE__ , "utf-8" )]
def UpperCAmelCase__ ( *A__ , **A__ ) -> Tuple:
"""simple docstring"""
return MockResponse()
@pytest.mark.parametrize("urls_type" , [str, list, dict] )
def UpperCAmelCase__ ( A__ , A__ , A__ ) -> List[str]:
"""simple docstring"""
import requests
monkeypatch.setattr(A__ , "request" , A__ )
lowerCamelCase__ = URL
if issubclass(A__ , A__ ):
lowerCamelCase__ = url
elif issubclass(A__ , A__ ):
lowerCamelCase__ = [url]
elif issubclass(A__ , A__ ):
lowerCamelCase__ = {"train": url}
lowerCamelCase__ = "dummy"
lowerCamelCase__ = "downloads"
lowerCamelCase__ = tmp_path
lowerCamelCase__ = DownloadConfig(
cache_dir=os.path.join(A__ , A__ ) , use_etag=A__ , )
lowerCamelCase__ = DownloadManager(dataset_name=A__ , download_config=A__ )
lowerCamelCase__ = dl_manager.download(A__ )
lowerCamelCase__ = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(A__ , A__ ):
lowerCamelCase__ = [downloaded_paths]
lowerCamelCase__ = [urls]
elif isinstance(A__ , A__ ):
assert "train" in downloaded_paths.keys()
lowerCamelCase__ = downloaded_paths.values()
lowerCamelCase__ = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(A__ , A__ ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
lowerCamelCase__ = Path(A__ )
lowerCamelCase__ = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
lowerCamelCase__ = downloaded_path.read_text()
assert content == CONTENT
lowerCamelCase__ = downloaded_path.with_suffix(".json" )
assert metadata_downloaded_path.exists()
lowerCamelCase__ = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("paths_type" , [str, list, dict] )
def UpperCAmelCase__ ( A__ , A__ , A__ ) -> Dict:
"""simple docstring"""
lowerCamelCase__ = str(A__ )
if issubclass(A__ , A__ ):
lowerCamelCase__ = filename
elif issubclass(A__ , A__ ):
lowerCamelCase__ = [filename]
elif issubclass(A__ , A__ ):
lowerCamelCase__ = {"train": filename}
lowerCamelCase__ = "dummy"
lowerCamelCase__ = xz_file.parent
lowerCamelCase__ = "extracted"
lowerCamelCase__ = DownloadConfig(
cache_dir=A__ , use_etag=A__ , )
lowerCamelCase__ = DownloadManager(dataset_name=A__ , download_config=A__ )
lowerCamelCase__ = dl_manager.extract(A__ )
lowerCamelCase__ = paths
for extracted_paths in [extracted_paths]:
if isinstance(A__ , A__ ):
lowerCamelCase__ = [extracted_paths]
lowerCamelCase__ = [paths]
elif isinstance(A__ , A__ ):
assert "train" in extracted_paths.keys()
lowerCamelCase__ = extracted_paths.values()
lowerCamelCase__ = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(A__ , A__ ):
assert extracted_path == dl_manager.extracted_paths[input_path]
lowerCamelCase__ = Path(A__ )
lowerCamelCase__ = extracted_path.parts
assert parts[-1] == hash_url_to_filename(A__ , etag=A__ )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
lowerCamelCase__ = extracted_path.read_text()
lowerCamelCase__ = text_file.read_text()
assert extracted_file_content == expected_file_content
def UpperCAmelCase__ ( A__ , A__ ) -> str:
"""simple docstring"""
assert path.endswith(".jsonl" )
for num_items, line in enumerate(A__ , start=1 ):
lowerCamelCase__ = json.loads(line.decode("utf-8" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("archive_jsonl" , ["tar_jsonl_path", "zip_jsonl_path"] )
def UpperCAmelCase__ ( A__ , A__ ) -> str:
"""simple docstring"""
lowerCamelCase__ = request.getfixturevalue(A__ )
lowerCamelCase__ = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(A__ ) , start=1 ):
_test_jsonl(A__ , A__ )
assert num_jsonl == 2
@pytest.mark.parametrize("archive_nested_jsonl" , ["tar_nested_jsonl_path", "zip_nested_jsonl_path"] )
def UpperCAmelCase__ ( A__ , A__ ) -> List[str]:
"""simple docstring"""
lowerCamelCase__ = request.getfixturevalue(A__ )
lowerCamelCase__ = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(A__ ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(A__ ) , start=1 ):
_test_jsonl(A__ , A__ )
assert num_tar == 1
assert num_jsonl == 2
def UpperCAmelCase__ ( A__ ) -> str:
"""simple docstring"""
lowerCamelCase__ = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(A__ ) , start=1 ):
assert os.path.basename(A__ ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 274 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ : Dict = logging.get_logger(__name__)
class _A ( __a ):
__a = 'encoder-decoder'
__a = True
def __init__( self , **SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
super().__init__(**SCREAMING_SNAKE_CASE__ )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
lowerCamelCase__ = kwargs.pop("encoder" )
lowerCamelCase__ = encoder_config.pop("model_type" )
lowerCamelCase__ = kwargs.pop("decoder" )
lowerCamelCase__ = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
lowerCamelCase__ = AutoConfig.for_model(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = AutoConfig.for_model(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = True
@classmethod
def _lowerCamelCase ( cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> PretrainedConfig:
logger.info("Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config" )
lowerCamelCase__ = True
lowerCamelCase__ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **SCREAMING_SNAKE_CASE__ )
def _lowerCamelCase ( self ) -> int:
lowerCamelCase__ = copy.deepcopy(self.__dict__ )
lowerCamelCase__ = self.encoder.to_dict()
lowerCamelCase__ = self.decoder.to_dict()
lowerCamelCase__ = self.__class__.model_type
return output
| 274 | 1 |
'''simple docstring'''
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ = 1000 ) -> int:
'''simple docstring'''
snake_case : Tuple = -1
snake_case : Tuple = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
snake_case : Optional[int] = (n * n - 2 * a * n) // (2 * n - 2 * a)
snake_case : Union[str, Any] = n - a - b
if c * c == (a * a + b * b):
snake_case : int = a * b * c
if candidate >= product:
snake_case : Optional[int] = candidate
return product
if __name__ == "__main__":
print(f"{solution() = }")
| 638 |
from math import factorial
def snake_case__ ( __SCREAMING_SNAKE_CASE = 20 ) -> int:
UpperCAmelCase_ = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
UpperCAmelCase_ = n // 2
return int(factorial(__SCREAMING_SNAKE_CASE ) / (factorial(__SCREAMING_SNAKE_CASE ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
SCREAMING_SNAKE_CASE = int(sys.argv[1])
print(solution(n))
except ValueError:
print("Invalid entry - please enter a number.")
| 579 | 0 |
import functools
def A ( lowercase , lowercase ) -> int:
'''simple docstring'''
UpperCamelCase = len(a_ )
UpperCamelCase = len(a_ )
@functools.cache
def min_distance(lowercase , lowercase ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
UpperCamelCase = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , a_ ) , 1 + min_distance(a_ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712 |
def A ( lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = int(lowercase )
if decimal in (0, 1): # Exit cases for the recursion
return str(lowercase )
UpperCamelCase , UpperCamelCase = divmod(lowercase , 2 )
return binary_recursive(lowercase ) + str(lowercase )
def A ( lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = str(lowercase ).strip()
if not number:
raise ValueError('No input value was provided' )
UpperCamelCase = '-' if number.startswith('-' ) else ''
UpperCamelCase = number.lstrip('-' )
if not number.isnumeric():
raise ValueError('Input value is not an integer' )
return f'''{negative}0b{binary_recursive(int(lowercase ) )}'''
if __name__ == "__main__":
from doctest import testmod
testmod()
| 3 | 0 |
import numpy as np
snake_case__ : str = [
["a", "b", "c", "d", "e"],
["f", "g", "h", "i", "k"],
["l", "m", "n", "o", "p"],
["q", "r", "s", "t", "u"],
["v", "w", "x", "y", "z"],
]
class _A :
'''simple docstring'''
def __init__( self : Union[str, Any] ):
'''simple docstring'''
__lowercase = np.array(__SCREAMING_SNAKE_CASE )
def _snake_case ( self : str , lowerCamelCase : Optional[int] ):
'''simple docstring'''
__lowercase = np.where(letter == self.SQUARE )
__lowercase = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def _snake_case ( self : Tuple , lowerCamelCase : Any , lowerCamelCase : Optional[int] ):
'''simple docstring'''
__lowercase = self.SQUARE[indexa - 1, indexa - 1]
return letter
def _snake_case ( self : Union[str, Any] , lowerCamelCase : Dict ):
'''simple docstring'''
__lowercase = message.lower()
__lowercase = message.replace(" " , "" )
__lowercase = message.replace("j" , "i" )
__lowercase = np.empty((2, len(__SCREAMING_SNAKE_CASE )) )
for letter_index in range(len(__SCREAMING_SNAKE_CASE ) ):
__lowercase = self.letter_to_numbers(message[letter_index] )
__lowercase = numbers[0]
__lowercase = numbers[1]
__lowercase = first_step.reshape(2 * len(__SCREAMING_SNAKE_CASE ) )
__lowercase = """"""
for numbers_index in range(len(__SCREAMING_SNAKE_CASE ) ):
__lowercase = int(second_step[numbers_index * 2] )
__lowercase = int(second_step[(numbers_index * 2) + 1] )
__lowercase = self.numbers_to_letter(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__lowercase = encoded_message + letter
return encoded_message
def _snake_case ( self : Tuple , lowerCamelCase : Optional[Any] ):
'''simple docstring'''
__lowercase = message.lower()
message.replace(" " , "" )
__lowercase = np.empty(2 * len(__SCREAMING_SNAKE_CASE ) )
for letter_index in range(len(__SCREAMING_SNAKE_CASE ) ):
__lowercase = self.letter_to_numbers(message[letter_index] )
__lowercase = numbers[0]
__lowercase = numbers[1]
__lowercase = first_step.reshape((2, len(__SCREAMING_SNAKE_CASE )) )
__lowercase = """"""
for numbers_index in range(len(__SCREAMING_SNAKE_CASE ) ):
__lowercase = int(second_step[0, numbers_index] )
__lowercase = int(second_step[1, numbers_index] )
__lowercase = self.numbers_to_letter(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__lowercase = decoded_message + letter
return decoded_message
| 402 |
'''simple docstring'''
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def UpperCamelCase__ ( __magic_name__ : str = "laptop" ) -> DataFrame:
'''simple docstring'''
snake_case__ : Union[str, Any] = f"https://www.amazon.in/laptop/s?k={product}"
snake_case__ : List[str] = {
"""User-Agent""": """Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36""",
"""Accept-Language""": """en-US, en;q=0.5""",
}
snake_case__ : int = BeautifulSoup(requests.get(__magic_name__ , headers=__magic_name__ ).text )
# Initialize a Pandas dataframe with the column titles
snake_case__ : Optional[Any] = DataFrame(
columns=[
"""Product Title""",
"""Product Link""",
"""Current Price of the product""",
"""Product Rating""",
"""MRP of the product""",
"""Discount""",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"""div""" , attrs={"""class""": """s-result-item""", """data-component-type""": """s-search-result"""} , ) , soup.find_all("""div""" , attrs={"""class""": """a-row a-size-base a-color-base"""} ) , ):
try:
snake_case__ : Optional[int] = item.ha.text
snake_case__ : Any = """https://www.amazon.in/""" + item.ha.a["""href"""]
snake_case__ : List[str] = item.find("""span""" , attrs={"""class""": """a-offscreen"""} ).text
try:
snake_case__ : Dict = item.find("""span""" , attrs={"""class""": """a-icon-alt"""} ).text
except AttributeError:
snake_case__ : Optional[int] = """Not available"""
try:
snake_case__ : Tuple = (
"""₹"""
+ item.find(
"""span""" , attrs={"""class""": """a-price a-text-price"""} ).text.split("""₹""" )[1]
)
except AttributeError:
snake_case__ : Optional[Any] = """"""
try:
snake_case__ : str = float(
(
(
float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
- float(product_price.strip("""₹""" ).replace(""",""" , """""" ) )
)
/ float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
)
* 1_00 )
except ValueError:
snake_case__ : List[Any] = float("""nan""" )
except AttributeError:
pass
snake_case__ : str = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
snake_case__ : List[Any] = """ """
snake_case__ : Union[str, Any] = """ """
data_frame.index += 1
return data_frame
if __name__ == "__main__":
A_ : int = "headphones"
get_amazon_product_data(product).to_csv(F'Amazon Product Data for {product}.csv')
| 38 | 0 |
'''simple docstring'''
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
_lowercase : Optional[int] = [
# tf -> hf
("/", "."),
("layer_", "layers."),
("kernel", "weight"),
("beta", "bias"),
("gamma", "weight"),
("pegasus", "model"),
]
_lowercase : int = [
(".output.dense", ".fc2"),
("intermediate.LayerNorm", "final_layer_norm"),
("intermediate.dense", "fc1"),
]
_lowercase : Tuple = (
INIT_COMMON
+ [
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.out_proj"),
("attention.self", "self_attn"),
("attention.encdec.LayerNorm", "encoder_attn_layer_norm"),
("attention.encdec_output.dense", "encoder_attn.out_proj"),
("attention.encdec", "encoder_attn"),
("key", "k_proj"),
("value", "v_proj"),
("query", "q_proj"),
("decoder.LayerNorm", "decoder.layernorm_embedding"),
]
+ END_COMMON
)
_lowercase : str = (
INIT_COMMON
+ [
("embeddings.word_embeddings", "shared.weight"),
("embeddings.position_embeddings", "embed_positions.weight"),
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.output"),
("attention.self", "self_attn.self"),
("encoder.LayerNorm", "encoder.layernorm_embedding"),
]
+ END_COMMON
)
_lowercase : int = [
"encdec/key/bias",
"encdec/query/bias",
"encdec/value/bias",
"self/key/bias",
"self/query/bias",
"self/value/bias",
"encdec_output/dense/bias",
"attention/output/dense/bias",
]
def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] ) -> List[Any]:
for tf_name, hf_name in patterns:
lowercase_ : Optional[Any] = k.replace(UpperCAmelCase__ , UpperCAmelCase__ )
return k
def lowerCamelCase ( UpperCAmelCase__ : dict , UpperCAmelCase__ : dict ) -> BigBirdPegasusForConditionalGeneration:
lowercase_ : Union[str, Any] = BigBirdPegasusConfig(**UpperCAmelCase__ )
lowercase_ : List[str] = BigBirdPegasusForConditionalGeneration(UpperCAmelCase__ )
lowercase_ : List[Any] = torch_model.state_dict()
lowercase_ : Dict = {}
# separating decoder weights
lowercase_ : Optional[int] = {k: tf_weights[k] for k in tf_weights if k.startswith("""pegasus/decoder""" )}
lowercase_ : List[str] = {k: tf_weights[k] for k in tf_weights if not k.startswith("""pegasus/decoder""" )}
for k, v in tqdm(decoder_weights.items() , """tf -> hf conversion""" ):
lowercase_ : Dict = [k.endswith(UpperCAmelCase__ ) for ending in KEYS_TO_IGNORE]
if any(UpperCAmelCase__ ):
continue
lowercase_ : Any = DECODER_PATTERNS
lowercase_ : Tuple = rename_state_dict_key(UpperCAmelCase__ , UpperCAmelCase__ )
if new_k not in state_dict:
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ):
lowercase_ : Any = v.T
lowercase_ : Optional[Any] = torch.from_numpy(UpperCAmelCase__ )
assert v.shape == state_dict[new_k].shape, F'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
for k, v in tqdm(remaining_weights.items() , """tf -> hf conversion""" ):
lowercase_ : Any = [k.endswith(UpperCAmelCase__ ) for ending in KEYS_TO_IGNORE]
if any(UpperCAmelCase__ ):
continue
lowercase_ : str = REMAINING_PATTERNS
lowercase_ : Optional[int] = rename_state_dict_key(UpperCAmelCase__ , UpperCAmelCase__ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ):
lowercase_ : Any = v.T
lowercase_ : List[str] = torch.from_numpy(UpperCAmelCase__ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
lowercase_ : int = mapping["""model.embed_positions.weight"""]
lowercase_ : List[str] = mapping.pop("""model.embed_positions.weight""" )
lowercase_ , lowercase_ : List[Any] = torch_model.load_state_dict(UpperCAmelCase__ , strict=UpperCAmelCase__ )
lowercase_ : Optional[Any] = [
k
for k in missing
if k
not in [
"""final_logits_bias""",
"""model.encoder.embed_tokens.weight""",
"""model.decoder.embed_tokens.weight""",
"""lm_head.weight""",
]
]
assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], F'''no matches found for the following tf keys {extra}'''
return torch_model
def lowerCamelCase ( UpperCAmelCase__ : Any ) -> Dict:
lowercase_ : List[Any] = tf.train.list_variables(UpperCAmelCase__ )
lowercase_ : Optional[int] = {}
lowercase_ : int = ["""global_step"""]
for name, shape in tqdm(UpperCAmelCase__ , desc="""converting tf checkpoint to dict""" ):
lowercase_ : Optional[int] = any(pat in name for pat in ignore_name )
if skip_key:
continue
lowercase_ : List[str] = tf.train.load_variable(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : Union[str, Any] = array
return tf_weights
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : dict ) -> Optional[int]:
lowercase_ : List[Any] = get_tf_weights_as_numpy(UpperCAmelCase__ )
lowercase_ : Any = convert_bigbird_pegasus(UpperCAmelCase__ , UpperCAmelCase__ )
torch_model.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
_lowercase : int = argparse.ArgumentParser()
parser.add_argument("--tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("--save_dir", default=None, type=str, help="Path to the output PyTorch model.")
_lowercase : Optional[Any] = parser.parse_args()
_lowercase : Any = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 30 | '''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __magic_name__ :
def __init__( self : Tuple , lowercase_ : Tuple , lowercase_ : Any=3 , lowercase_ : int=32 , lowercase_ : str=3 , lowercase_ : int=10 , lowercase_ : Any=[8, 16, 32, 64] , lowercase_ : Tuple=[1, 1, 2, 1] , lowercase_ : Any=True , lowercase_ : int=True , lowercase_ : Any="relu" , lowercase_ : List[Any]=3 , lowercase_ : Tuple=None , lowercase_ : Union[str, Any]=["stage2", "stage3", "stage4"] , lowercase_ : Optional[int]=[2, 3, 4] , lowercase_ : List[str]=1 , ):
lowercase_ : Any = parent
lowercase_ : str = batch_size
lowercase_ : Any = image_size
lowercase_ : Optional[Any] = num_channels
lowercase_ : Any = embeddings_size
lowercase_ : Union[str, Any] = hidden_sizes
lowercase_ : Any = depths
lowercase_ : Dict = is_training
lowercase_ : Tuple = use_labels
lowercase_ : str = hidden_act
lowercase_ : Optional[Any] = num_labels
lowercase_ : Tuple = scope
lowercase_ : Any = len(lowercase_ )
lowercase_ : Optional[Any] = out_features
lowercase_ : Tuple = out_indices
lowercase_ : str = num_groups
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : List[Any] = None
if self.use_labels:
lowercase_ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
lowercase_ : int = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : int , lowercase_ : List[str] , lowercase_ : List[str] ):
lowercase_ : Optional[int] = BitModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : List[Any] = model(lowercase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : List[Any] ):
lowercase_ : Union[str, Any] = self.num_labels
lowercase_ : Tuple = BitForImageClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Any = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : Optional[int] ):
lowercase_ : Any = BitBackbone(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Dict = model(lowercase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowercase_ : List[str] = None
lowercase_ : Dict = BitBackbone(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Tuple = model(lowercase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Optional[int] = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ : Optional[Any] = config_and_inputs
lowercase_ : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
UpperCamelCase__ = (
{'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : int = BitModelTester(self )
lowercase_ : Optional[int] = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
return
@unittest.skip(reason="""Bit does not output attentions""" )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
pass
@unittest.skip(reason="""Bit does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
pass
@unittest.skip(reason="""Bit does not support input and output embeddings""" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
pass
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ , lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Optional[Any] = model_class(lowercase_ )
lowercase_ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Union[str, Any] = [*signature.parameters.keys()]
lowercase_ : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : List[Any] = model_class(config=lowercase_ )
for name, module in model.named_modules():
if isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
def check_hidden_states_output(lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : int ):
lowercase_ : Optional[Any] = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : List[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase_ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase_ : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(lowercase_ ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase_ , lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Dict = ["""preactivation""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase_ : Union[str, Any] = layer_type
lowercase_ : Optional[Any] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : Union[str, Any] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
@unittest.skip(reason="""Bit does not use feedforward chunking""" )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
pass
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : List[str] = BitModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : List[str] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowercase_ )
lowercase_ : int = self.default_image_processor
lowercase_ : List[Any] = prepare_img()
lowercase_ : Dict = image_processor(images=lowercase_ , return_tensors="""pt""" ).to(lowercase_ )
# forward pass
with torch.no_grad():
lowercase_ : str = model(**lowercase_ )
# verify the logits
lowercase_ : Optional[int] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase_ )
lowercase_ : Union[str, Any] = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
@require_torch
class __magic_name__ ( _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = (BitBackbone,) if is_torch_available() else ()
UpperCamelCase__ = BitConfig
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Union[str, Any] = BitModelTester(self )
| 30 | 1 |
def a ( A__ : str ) -> bool:
"""simple docstring"""
if not all(x.isalpha() for x in string ):
raise ValueError('String must only contain alphabetic characters.' )
_lowercase =sorted(string.lower() )
return len(A__ ) == len(set(A__ ) )
if __name__ == "__main__":
lowercase_ = input('Enter a string ').strip()
lowercase_ = is_isogram(input_str)
print(f"{input_str} is {'an' if isogram else 'not an'} isogram.")
| 291 |
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
lowercase_ = HfArgumentParser(InitializationArguments)
lowercase_ = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
lowercase_ = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
lowercase_ = {
'vocab_size': len(tokenizer),
'scale_attn_by_inverse_layer_idx': True,
'reorder_and_upcast_attn': True,
}
# Load model config (GPT-2 large in this case)
lowercase_ = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
lowercase_ = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 291 | 1 |
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = 256
# Modulus to hash a string
lowerCAmelCase_ : int = 1_000_003
def __a ( __lowerCamelCase : str , __lowerCamelCase : str ) -> bool:
'''simple docstring'''
lowercase_ = len(__lowerCamelCase )
lowercase_ = len(__lowerCamelCase )
if p_len > t_len:
return False
lowercase_ = 0
lowercase_ = 0
lowercase_ = 1
# Calculating the hash of pattern and substring of text
for i in range(__lowerCamelCase ):
lowercase_ = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
lowercase_ = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
lowercase_ = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
lowercase_ = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def __a ( ) -> None:
'''simple docstring'''
lowercase_ = "abc1abc12"
lowercase_ = "alskfjaldsabc1abc1abc12k23adsfabcabc"
lowercase_ = "alskfjaldsk23adsfabcabc"
assert rabin_karp(__lowerCamelCase , __lowerCamelCase ) and not rabin_karp(__lowerCamelCase , __lowerCamelCase )
# Test 2)
lowercase_ = "ABABX"
lowercase_ = "ABABZABABYABABX"
assert rabin_karp(__lowerCamelCase , __lowerCamelCase )
# Test 3)
lowercase_ = "AAAB"
lowercase_ = "ABAAAAAB"
assert rabin_karp(__lowerCamelCase , __lowerCamelCase )
# Test 4)
lowercase_ = "abcdabcy"
lowercase_ = "abcxabcdabxabcdabcdabcy"
assert rabin_karp(__lowerCamelCase , __lowerCamelCase )
# Test 5)
lowercase_ = "Lü"
lowercase_ = "Lüsai"
assert rabin_karp(__lowerCamelCase , __lowerCamelCase )
lowercase_ = "Lue"
assert not rabin_karp(__lowerCamelCase , __lowerCamelCase )
print("Success." )
if __name__ == "__main__":
test_rabin_karp()
| 461 | '''simple docstring'''
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
lowerCAmelCase_ : Optional[Any] = "bert-base-cased"
lowerCAmelCase_ : Any = "fp16"
lowerCAmelCase_ : Union[str, Any] = "bf16"
lowerCAmelCase_ : List[Any] = [FPaa, BFaa]
@require_fsdp
@require_cuda
class lowercase ( __lowerCamelCase ):
def __UpperCAmelCase ( self : str) -> Union[str, Any]:
super().setUp()
lowercase_ = dict(
ACCELERATE_USE_FSDP="true" , MASTER_ADDR="localhost" , MASTER_PORT="10999" , RANK="0" , LOCAL_RANK="0" , WORLD_SIZE="1" , )
def __UpperCAmelCase ( self : Optional[Any]) -> List[Any]:
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(__lowerCAmelCase):
lowercase_ = self.dist_env.copy()
lowercase_ = F'{i + 1}'
lowercase_ = strategy
with mockenv_context(**__lowerCAmelCase):
lowercase_ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1))
def __UpperCAmelCase ( self : Union[str, Any]) -> List[str]:
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(__lowerCAmelCase):
lowercase_ = self.dist_env.copy()
lowercase_ = prefetch_policy
with mockenv_context(**__lowerCAmelCase):
lowercase_ = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch)
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1))
def __UpperCAmelCase ( self : Dict) -> List[str]:
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(__lowerCAmelCase):
lowercase_ = self.dist_env.copy()
lowercase_ = state_dict_type
with mockenv_context(**__lowerCAmelCase):
lowercase_ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1))
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu)
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only)
def __UpperCAmelCase ( self : Optional[int]) -> List[Any]:
lowercase_ = AutoModel.from_pretrained(__lowerCAmelCase)
for policy in FSDP_AUTO_WRAP_POLICY:
lowercase_ = self.dist_env.copy()
lowercase_ = policy
if policy == "TRANSFORMER_BASED_WRAP":
lowercase_ = "BertLayer"
elif policy == "SIZE_BASED_WRAP":
lowercase_ = "2000"
with mockenv_context(**__lowerCAmelCase):
lowercase_ = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__lowerCAmelCase)
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy)
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy)
lowercase_ = self.dist_env.copy()
lowercase_ = "TRANSFORMER_BASED_WRAP"
lowercase_ = "T5Layer"
with mockenv_context(**__lowerCAmelCase):
lowercase_ = FullyShardedDataParallelPlugin()
with self.assertRaises(__lowerCAmelCase) as cm:
fsdp_plugin.set_auto_wrap_policy(__lowerCAmelCase)
self.assertTrue("Could not find the transformer layer class to wrap in the model." in str(cm.exception))
lowercase_ = self.dist_env.copy()
lowercase_ = "SIZE_BASED_WRAP"
lowercase_ = "0"
with mockenv_context(**__lowerCAmelCase):
lowercase_ = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__lowerCAmelCase)
self.assertIsNone(fsdp_plugin.auto_wrap_policy)
def __UpperCAmelCase ( self : Union[str, Any]) -> int:
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
lowercase_ = self.dist_env.copy()
lowercase_ = mp_dtype
with mockenv_context(**__lowerCAmelCase):
lowercase_ = Accelerator()
if mp_dtype == "fp16":
lowercase_ = torch.floataa
elif mp_dtype == "bf16":
lowercase_ = torch.bfloataa
lowercase_ = MixedPrecision(param_dtype=__lowerCAmelCase , reduce_dtype=__lowerCAmelCase , buffer_dtype=__lowerCAmelCase)
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , __lowerCAmelCase)
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , __lowerCAmelCase))
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler)
AcceleratorState._reset_state(__lowerCAmelCase)
def __UpperCAmelCase ( self : List[str]) -> Dict:
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
lowercase_ = self.dist_env.copy()
lowercase_ = str(__lowerCAmelCase).lower()
with mockenv_context(**__lowerCAmelCase):
lowercase_ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=__lowerCAmelCase))
@require_fsdp
@require_multi_gpu
@slow
class lowercase ( __lowerCamelCase ):
def __UpperCAmelCase ( self : Optional[int]) -> str:
super().setUp()
lowercase_ = 0.82
lowercase_ = [
"fsdp_shard_grad_op_transformer_based_wrap",
"fsdp_full_shard_transformer_based_wrap",
]
lowercase_ = {
"multi_gpu_fp16": 3200,
"fsdp_shard_grad_op_transformer_based_wrap_fp16": 2000,
"fsdp_full_shard_transformer_based_wrap_fp16": 1900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
lowercase_ = 160
lowercase_ = 160
lowercase_ = inspect.getfile(accelerate.test_utils)
lowercase_ = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "external_deps"])
def __UpperCAmelCase ( self : Optional[Any]) -> Optional[Any]:
lowercase_ = os.path.join(self.test_scripts_folder , "test_performance.py")
lowercase_ = ["accelerate", "launch", "--num_processes=2", "--num_machines=1", "--machine_rank=0", "--use_fsdp"]
for config in self.performance_configs:
lowercase_ = cmd.copy()
for i, strategy in enumerate(__lowerCAmelCase):
if strategy.lower() in config:
cmd_config.append(F'--fsdp_sharding_strategy={i+1}')
break
if "fp32" in config:
cmd_config.append("--mixed_precision=no")
else:
cmd_config.append("--mixed_precision=fp16")
if "cpu_offload" in config:
cmd_config.append("--fsdp_offload_params=True")
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(F'--fsdp_auto_wrap_policy={policy}')
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("--fsdp_transformer_layer_cls_to_wrap=BertLayer")
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("--fsdp_min_num_params=2000")
cmd_config.extend(
[
self.test_file_path,
F'--output_dir={self.tmpdir}',
F'--performance_lower_bound={self.performance_lower_bound}',
])
with patch_environment(omp_num_threads=1):
execute_subprocess_async(__lowerCAmelCase , env=os.environ.copy())
def __UpperCAmelCase ( self : Dict) -> Dict:
lowercase_ = os.path.join(self.test_scripts_folder , "test_checkpointing.py")
lowercase_ = [
"accelerate",
"launch",
"--num_processes=2",
"--num_machines=1",
"--machine_rank=0",
"--use_fsdp",
"--mixed_precision=fp16",
"--fsdp_transformer_layer_cls_to_wrap=BertLayer",
]
for i, strategy in enumerate(__lowerCAmelCase):
lowercase_ = cmd.copy()
cmd_config.append(F'--fsdp_sharding_strategy={i+1}')
if strategy != "FULL_SHARD":
continue
lowercase_ = len(__lowerCAmelCase)
for state_dict_type in FSDP_STATE_DICT_TYPE:
lowercase_ = cmd_config[:state_dict_config_index]
cmd_config.append(F'--fsdp_state_dict_type={state_dict_type}')
cmd_config.extend(
[
self.test_file_path,
F'--output_dir={self.tmpdir}',
"--partial_train_epoch=1",
])
with patch_environment(omp_num_threads=1):
execute_subprocess_async(__lowerCAmelCase , env=os.environ.copy())
lowercase_ = cmd_config[:-1]
lowercase_ = os.path.join(self.tmpdir , "epoch_0")
cmd_config.extend(
[
F'--resume_from_checkpoint={resume_from_checkpoint}',
])
with patch_environment(omp_num_threads=1):
execute_subprocess_async(__lowerCAmelCase , env=os.environ.copy())
def __UpperCAmelCase ( self : Optional[int]) -> int:
lowercase_ = os.path.join(self.test_scripts_folder , "test_peak_memory_usage.py")
lowercase_ = [
"accelerate",
"launch",
"--num_processes=2",
"--num_machines=1",
"--machine_rank=0",
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
lowercase_ = cmd.copy()
if "fp16" in spec:
cmd_config.extend(["--mixed_precision=fp16"])
else:
cmd_config.extend(["--mixed_precision=no"])
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(["--use_fsdp"])
for i, strategy in enumerate(__lowerCAmelCase):
if strategy.lower() in spec:
cmd_config.append(F'--fsdp_sharding_strategy={i+1}')
break
if "cpu_offload" in spec:
cmd_config.append("--fsdp_offload_params=True")
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(F'--fsdp_auto_wrap_policy={policy}')
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("--fsdp_transformer_layer_cls_to_wrap=BertLayer")
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("--fsdp_min_num_params=2000")
cmd_config.extend(
[
self.test_file_path,
F'--output_dir={self.tmpdir}',
F'--peak_memory_upper_bound={peak_mem_upper_bound}',
F'--n_train={self.n_train}',
F'--n_val={self.n_val}',
])
with patch_environment(omp_num_threads=1):
execute_subprocess_async(__lowerCAmelCase , env=os.environ.copy())
| 461 | 1 |
import os
def lowerCamelCase__ ():
_SCREAMING_SNAKE_CASE : int = os.path.join(os.path.dirname(__lowerCamelCase ), "num.txt" )
with open(__lowerCamelCase ) as file_hand:
return str(sum(int(__lowerCamelCase ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution()) | 249 |
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
UpperCamelCase__ =logging.get_logger(__name__)
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
def __init__( self , *__lowerCamelCase , **__lowerCamelCase ) -> None:
warnings.warn(
"The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use OwlViTImageProcessor instead." , __lowerCamelCase , )
super().__init__(*__lowerCamelCase , **__lowerCamelCase ) | 249 | 1 |
"""simple docstring"""
from __future__ import annotations
def A__ ( __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 0
_lowerCAmelCase = len(_SCREAMING_SNAKE_CASE ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
_lowerCAmelCase = i + 1
else:
_lowerCAmelCase = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'{two_pointer([2, 7, 11, 15], 9) = }')
| 702 |
"""simple docstring"""
from __future__ import annotations
import numpy as np
def A__ ( __lowerCamelCase ):
"""simple docstring"""
return np.maximum(0, __lowerCamelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 309 | 0 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = 42
A_ = 42
class _a ( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
A_ = 1
@register_to_config
def __init__( self , _UpperCAmelCase = 2000 , _UpperCAmelCase = 0.1_5 , _UpperCAmelCase = 0.0_1 , _UpperCAmelCase = 1_3_4_8.0 , _UpperCAmelCase = 1e-5 , _UpperCAmelCase = 1 , ) -> Tuple:
# standard deviation of the initial noise distribution
UpperCamelCase_ = sigma_max
# setable values
UpperCamelCase_ = None
self.set_sigmas(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ) -> torch.FloatTensor:
return sample
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None ) -> str:
UpperCamelCase_ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
UpperCamelCase_ = torch.linspace(1 , _UpperCAmelCase , _UpperCAmelCase , device=_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None ) -> Any:
UpperCamelCase_ = sigma_min if sigma_min is not None else self.config.sigma_min
UpperCamelCase_ = sigma_max if sigma_max is not None else self.config.sigma_max
UpperCamelCase_ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_ = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
UpperCamelCase_ = torch.exp(torch.linspace(math.log(_UpperCAmelCase ) , math.log(_UpperCAmelCase ) , _UpperCAmelCase ) )
UpperCamelCase_ = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = True , ) -> Union[SdeVeOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
UpperCamelCase_ = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
UpperCamelCase_ = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
UpperCamelCase_ = timesteps.to(self.discrete_sigmas.device )
UpperCamelCase_ = self.discrete_sigmas[timesteps].to(sample.device )
UpperCamelCase_ = self.get_adjacent_sigma(_UpperCAmelCase , _UpperCAmelCase ).to(sample.device )
UpperCamelCase_ = torch.zeros_like(_UpperCAmelCase )
UpperCamelCase_ = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
UpperCamelCase_ = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
UpperCamelCase_ = diffusion.unsqueeze(-1 )
UpperCamelCase_ = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
UpperCamelCase_ = randn_tensor(
sample.shape , layout=sample.layout , generator=_UpperCAmelCase , device=sample.device , dtype=sample.dtype )
UpperCamelCase_ = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
UpperCamelCase_ = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=_UpperCAmelCase , prev_sample_mean=_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = True , ) -> Union[SchedulerOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
UpperCamelCase_ = randn_tensor(sample.shape , layout=sample.layout , generator=_UpperCAmelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
UpperCamelCase_ = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
UpperCamelCase_ = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
UpperCamelCase_ = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
UpperCamelCase_ = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
UpperCamelCase_ = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
UpperCamelCase_ = step_size.unsqueeze(-1 )
UpperCamelCase_ = sample + step_size * model_output
UpperCamelCase_ = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
UpperCamelCase_ = timesteps.to(original_samples.device )
UpperCamelCase_ = self.discrete_sigmas.to(original_samples.device )[timesteps]
UpperCamelCase_ = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(_UpperCAmelCase ) * sigmas[:, None, None, None]
)
UpperCamelCase_ = noise + original_samples
return noisy_samples
def __len__( self ) -> Optional[int]:
return self.config.num_train_timesteps
| 23 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class a ( unittest.TestCase ):
def __init__( self : Any , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any]=13 , lowerCAmelCase : List[Any]=7 , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : str=True , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : str=99 , lowerCAmelCase : str=32 , lowerCAmelCase : Tuple=5 , lowerCAmelCase : Optional[Any]=4 , lowerCAmelCase : str=37 , lowerCAmelCase : List[Any]="gelu" , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : List[Any]=0.1 , lowerCAmelCase : str=512 , lowerCAmelCase : Any=16 , lowerCAmelCase : Dict=2 , lowerCAmelCase : Optional[Any]=0.0_2 , lowerCAmelCase : Dict=4 , ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =parent
SCREAMING_SNAKE_CASE_: Dict =batch_size
SCREAMING_SNAKE_CASE_: str =seq_length
SCREAMING_SNAKE_CASE_: int =is_training
SCREAMING_SNAKE_CASE_: Dict =use_attention_mask
SCREAMING_SNAKE_CASE_: List[str] =use_token_type_ids
SCREAMING_SNAKE_CASE_: Union[str, Any] =use_labels
SCREAMING_SNAKE_CASE_: Any =vocab_size
SCREAMING_SNAKE_CASE_: Tuple =hidden_size
SCREAMING_SNAKE_CASE_: Tuple =num_hidden_layers
SCREAMING_SNAKE_CASE_: Dict =num_attention_heads
SCREAMING_SNAKE_CASE_: str =intermediate_size
SCREAMING_SNAKE_CASE_: int =hidden_act
SCREAMING_SNAKE_CASE_: Dict =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Tuple =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: Tuple =max_position_embeddings
SCREAMING_SNAKE_CASE_: str =type_vocab_size
SCREAMING_SNAKE_CASE_: str =type_sequence_label_size
SCREAMING_SNAKE_CASE_: str =initializer_range
SCREAMING_SNAKE_CASE_: str =num_choices
def lowerCamelCase__ ( self : Tuple ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_: Any =None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE_: str =random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_: Tuple =DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=lowerCAmelCase , )
return config, input_ids, attention_mask
def lowerCamelCase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple =config_and_inputs
SCREAMING_SNAKE_CASE_: Optional[Any] ={"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class a ( UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : str = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =FlaxDistilBertModelTester(self )
@slow
def lowerCamelCase__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE_: str =model_class_name.from_pretrained("""distilbert-base-uncased""" )
SCREAMING_SNAKE_CASE_: List[str] =model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCAmelCase )
@require_flax
class a ( unittest.TestCase ):
@slow
def lowerCamelCase__ ( self : int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =FlaxDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
SCREAMING_SNAKE_CASE_: Tuple =np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
SCREAMING_SNAKE_CASE_: Any =np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
SCREAMING_SNAKE_CASE_: Union[str, Any] =model(lowerCAmelCase , attention_mask=lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE_: Union[str, Any] =(1, 11, 768)
self.assertEqual(output.shape , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =np.array([[[-0.1_6_3_9, 0.3_2_9_9, 0.1_6_4_8], [-0.1_7_4_6, 0.3_2_8_9, 0.1_7_1_0], [-0.1_8_8_4, 0.3_3_5_7, 0.1_8_1_0]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowerCAmelCase , atol=1E-4 ) )
| 409 | 0 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _SCREAMING_SNAKE_CASE ( A : Any ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : List[str] = filter(lambda A : p.requires_grad , model.parameters() )
__snake_case : int = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__A = logging.getLogger(__name__)
def _SCREAMING_SNAKE_CASE ( A : List[Any] , A : int ) -> Any:
"""simple docstring"""
if metric == "rouge2":
__snake_case : Tuple = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
__snake_case : Tuple = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
__snake_case : Optional[int] = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
__snake_case : Union[str, Any] = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
' function.' )
__snake_case : str = ModelCheckpoint(
dirpath=A , filename=A , monitor=F"""val_{metric}""" , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def _SCREAMING_SNAKE_CASE ( A : Optional[int] , A : Tuple ) -> Dict:
"""simple docstring"""
return EarlyStopping(
monitor=F"""val_{metric}""" , mode='min' if 'loss' in metric else 'max' , patience=A , verbose=A , )
class a_ ( pl.Callback ):
def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> str:
"""simple docstring"""
__snake_case : int = {F"""lr_group_{i}""": param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups)}
pl_module.logger.log_metrics(__a)
@rank_zero_only
def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a , __a=True) -> None:
"""simple docstring"""
logger.info(F"""***** {type_path} results at step {trainer.global_step:05d} *****""")
__snake_case : List[str] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']})
# Log results
__snake_case : int = Path(pl_module.hparams.output_dir)
if type_path == "test":
__snake_case : Any = od / 'test_results.txt'
__snake_case : Any = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__snake_case : Optional[Any] = od / F"""{type_path}_results/{trainer.global_step:05d}.txt"""
__snake_case : Union[str, Any] = od / F"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=__a)
generations_file.parent.mkdir(exist_ok=__a)
with open(__a , 'a+') as writer:
for key in sorted(__a):
if key in ["log", "progress_bar", "preds"]:
continue
__snake_case : Dict = metrics[key]
if isinstance(__a , torch.Tensor):
__snake_case : str = val.item()
__snake_case : Tuple = F"""{key}: {val:.6f}\n"""
writer.write(__a)
if not save_generations:
return
if "preds" in metrics:
__snake_case : Tuple = '\n'.join(metrics['preds'])
generations_file.open('w+').write(__a)
@rank_zero_only
def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> Dict:
"""simple docstring"""
try:
__snake_case : List[Any] = pl_module.model.model.num_parameters()
except AttributeError:
__snake_case : Dict = pl_module.model.num_parameters()
__snake_case : Union[str, Any] = count_trainable_parameters(__a)
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6})
@rank_zero_only
def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> Optional[int]:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path)
return self._write_logs(__a , __a , 'test')
@rank_zero_only
def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> str:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path)
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid") | 61 |
'''simple docstring'''
__A = {str(digit): digit**5 for digit in range(1_0)}
def _SCREAMING_SNAKE_CASE ( A : int ) -> int:
"""simple docstring"""
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(A ) )
def _SCREAMING_SNAKE_CASE ( ) -> int:
"""simple docstring"""
return sum(
number
for number in range(10_00 , 1_00_00_00 )
if number == digits_fifth_powers_sum(A ) )
if __name__ == "__main__":
print(solution()) | 61 | 1 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowerCamelCase__ : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase__ : Optional[int] = {
'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "gptj"
lowercase_ = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : str , _lowerCAmelCase : str=50_400 , _lowerCAmelCase : int=2_048 , _lowerCAmelCase : Union[str, Any]=4_096 , _lowerCAmelCase : Tuple=28 , _lowerCAmelCase : List[str]=16 , _lowerCAmelCase : Dict=64 , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : str="gelu_new" , _lowerCAmelCase : List[str]=0.0 , _lowerCAmelCase : Any=0.0 , _lowerCAmelCase : Optional[Any]=0.0 , _lowerCAmelCase : str=1E-5 , _lowerCAmelCase : Union[str, Any]=0.02 , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Any=50_256 , _lowerCAmelCase : Optional[int]=50_256 , _lowerCAmelCase : Tuple=False , **_lowerCAmelCase : Tuple , ):
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = n_positions
SCREAMING_SNAKE_CASE_ = n_embd
SCREAMING_SNAKE_CASE_ = n_layer
SCREAMING_SNAKE_CASE_ = n_head
SCREAMING_SNAKE_CASE_ = n_inner
SCREAMING_SNAKE_CASE_ = rotary_dim
SCREAMING_SNAKE_CASE_ = activation_function
SCREAMING_SNAKE_CASE_ = resid_pdrop
SCREAMING_SNAKE_CASE_ = embd_pdrop
SCREAMING_SNAKE_CASE_ = attn_pdrop
SCREAMING_SNAKE_CASE_ = layer_norm_epsilon
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = use_cache
SCREAMING_SNAKE_CASE_ = bos_token_id
SCREAMING_SNAKE_CASE_ = eos_token_id
super().__init__(
bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , tie_word_embeddings=_lowerCAmelCase , **_lowerCAmelCase )
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Union[str, Any] , _lowerCAmelCase : PretrainedConfig , _lowerCAmelCase : str = "default" , _lowerCAmelCase : List[PatchingSpec] = None , _lowerCAmelCase : bool = False , ):
super().__init__(_lowerCAmelCase , task=_lowerCAmelCase , patching_specs=_lowerCAmelCase , use_past=_lowerCAmelCase )
if not getattr(self._config , 'pad_token_id' , _lowerCAmelCase ):
# TODO: how to do that better?
SCREAMING_SNAKE_CASE_ = 0
@property
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase , direction='inputs' )
SCREAMING_SNAKE_CASE_ = {0: 'batch', 1: 'past_sequence + sequence'}
else:
SCREAMING_SNAKE_CASE_ = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def lowerCAmelCase_ ( self : Dict ):
return self._config.n_layer
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
return self._config.n_head
def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : PreTrainedTokenizer , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , ):
SCREAMING_SNAKE_CASE_ = super(_lowerCAmelCase , self ).generate_dummy_inputs(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
# We need to order the input in the way they appears in the forward()
SCREAMING_SNAKE_CASE_ = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE_ = seqlen + 2
SCREAMING_SNAKE_CASE_ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
SCREAMING_SNAKE_CASE_ = [
(torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) for _ in range(self.num_layers )
]
SCREAMING_SNAKE_CASE_ = common_inputs['attention_mask']
if self.use_past:
SCREAMING_SNAKE_CASE_ = ordered_inputs['attention_mask'].dtype
SCREAMING_SNAKE_CASE_ = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(_lowerCAmelCase , _lowerCAmelCase , dtype=_lowerCAmelCase )] , dim=1 )
return ordered_inputs
@property
def lowerCAmelCase_ ( self : int ):
return 13 | 31 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_a : Any = {
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Union[str, Any] = [
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Tuple = [
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
_a : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 447 | 0 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase : Any = {"""configuration_mmbt""": ["""MMBTConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Dict = ["""MMBTForClassification""", """MMBTModel""", """ModalEmbeddings"""]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
UpperCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 151 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def UpperCamelCase_ ( *__a ) -> Any:
with open(__a , "r" ) as fh:
fcntl.flock(__a , fcntl.LOCK_EX )
try:
print(*__a )
finally:
fcntl.flock(__a , fcntl.LOCK_UN )
UpperCamelCase : Tuple = int(os.environ["""LOCAL_RANK"""])
torch.cuda.set_device(local_rank)
UpperCamelCase : Any = torch.device("""cuda""", local_rank)
UpperCamelCase : Union[str, Any] = socket.gethostname()
UpperCamelCase : int = f"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group("""nccl""")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
UpperCamelCase : str = dist.get_rank()
UpperCamelCase : Optional[int] = dist.get_world_size()
printflock(f"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(f"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(f"""{gpu} is broken""")
raise
| 151 | 1 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCamelCase : int = 6 ):
'''simple docstring'''
__UpperCAmelCase : Node | None = None
__UpperCAmelCase : Node | None = None
self.create_linked_list(UpperCamelCase )
def lowerCamelCase__ ( self : int , UpperCamelCase : int ):
'''simple docstring'''
__UpperCAmelCase : int = Node()
__UpperCAmelCase : Optional[Any] = current_node
__UpperCAmelCase : str = current_node
__UpperCAmelCase : List[str] = current_node
for _ in range(1 , UpperCamelCase ):
__UpperCAmelCase : Dict = Node()
__UpperCAmelCase : str = current_node
__UpperCAmelCase : str = previous_node
__UpperCAmelCase : str = current_node
__UpperCAmelCase : int = self.front
__UpperCAmelCase : Union[str, Any] = previous_node
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
self.check_can_perform_operation()
return self.front.data if self.front else None
def lowerCamelCase__ ( self : Any , UpperCamelCase : Any ):
'''simple docstring'''
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
__UpperCAmelCase : int = self.rear.next
if self.rear:
__UpperCAmelCase : Tuple = data
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
__UpperCAmelCase : Tuple = self.front.data
__UpperCAmelCase : Union[str, Any] = None
return data
__UpperCAmelCase : int = self.front
__UpperCAmelCase : Optional[Any] = old_front.next
__UpperCAmelCase : Any = old_front.data
__UpperCAmelCase : Any = None
return data
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
if self.is_empty():
raise Exception("""Empty Queue""" )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""" )
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Any | None = None
__UpperCAmelCase : Node | None = None
__UpperCAmelCase : Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 139 |
"""simple docstring"""
import itertools
import math
def lowerCamelCase ( _UpperCamelCase : int ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCamelCase ( ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Tuple = 2
while True:
if is_prime(_UpperCamelCase ):
yield num
num += 1
def lowerCamelCase ( _UpperCamelCase : int = 1_0_0_0_1 ) -> int:
'''simple docstring'''
return next(itertools.islice(prime_generator() , nth - 1 , _UpperCamelCase ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 139 | 1 |
'''simple docstring'''
from __future__ import annotations
def _lowercase ( lowerCamelCase__ ) -> list[int]:
"""simple docstring"""
return [ord(lowerCamelCase__ ) - 96 for elem in plain]
def _lowercase ( lowerCamelCase__ ) -> str:
"""simple docstring"""
return "".join(chr(elem + 96 ) for elem in encoded )
def _lowercase ( ) -> None:
"""simple docstring"""
__UpperCAmelCase : List[Any] = encode(input("-> " ).strip().lower() )
print("Encoded: " , lowerCamelCase__ )
print("Decoded:" , decode(lowerCamelCase__ ) )
if __name__ == "__main__":
main()
| 10 | '''simple docstring'''
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
_a : Union[str, Any] = HfApi()
_a : int = {}
# fmt: off
_a : Optional[int] = torch.tensor([
-0.7_515, -1.6_883, 0.2_420, 0.0_300, 0.6_347, 1.3_433, -1.1_743, -3.7_467,
1.2_342, -2.2_485, 0.4_636, 0.8_076, -0.7_991, 0.3_969, 0.8_498, 0.9_189,
-1.8_887, -3.3_522, 0.7_639, 0.2_040, 0.6_271, -2.7_148, -1.6_316, 3.0_839,
0.3_186, 0.2_721, -0.9_759, -1.2_461, 2.6_257, 1.3_557
])
_a : Optional[Any] = torch.tensor([
-2.3_639, -2.5_344, 0.0_054, -0.6_674, 1.5_990, 1.0_158, 0.3_124, -2.1_436,
1.8_795, -2.5_429, -0.1_566, -0.3_973, 1.2_490, 2.6_447, 1.2_283, -0.5_208,
-2.8_154, -3.5_119, 2.3_838, 1.2_033, 1.7_201, -2.1_256, -1.4_576, 2.7_948,
2.4_204, -0.9_752, -1.2_546, 0.8_027, 3.2_758, 3.1_365
])
_a : int = torch.tensor([
-0.6_531, -0.6_891, -0.3_172, -0.5_375, -0.9_140, -0.5_367, -0.1_175, -0.7_869,
-0.3_808, -0.4_513, -0.2_098, -0.0_083, 0.3_183, 0.5_140, 0.2_247, -0.1_304,
-0.1_302, -0.2_802, -0.2_084, -0.2_025, -0.4_967, -0.4_873, -0.0_861, 0.6_925,
0.0_250, 0.1_290, -0.1_543, 0.6_316, 1.0_460, 1.4_943
])
_a : str = torch.tensor([
0.0_911, 0.1_107, 0.0_182, 0.0_435, -0.0_805, -0.0_608, 0.0_381, 0.2_172,
-0.0_280, 0.1_327, -0.0_299, -0.0_255, -0.0_050, -0.1_170, -0.1_046, 0.0_309,
0.1_367, 0.1_728, -0.0_533, -0.0_748, -0.0_534, 0.1_624, 0.0_384, -0.1_805,
-0.0_707, 0.0_642, 0.0_220, -0.0_134, -0.1_333, -0.1_505
])
_a : Union[str, Any] = torch.tensor([
0.1_321, 0.1_337, 0.0_440, 0.0_622, -0.0_591, -0.0_370, 0.0_503, 0.2_133,
-0.0_177, 0.1_415, -0.0_116, -0.0_112, 0.0_044, -0.0_980, -0.0_789, 0.0_395,
0.1_502, 0.1_785, -0.0_488, -0.0_514, -0.0_404, 0.1_539, 0.0_454, -0.1_559,
-0.0_665, 0.0_659, 0.0_383, -0.0_005, -0.1_266, -0.1_386
])
_a : Any = torch.tensor([
0.1_154, 0.1_218, 0.0_307, 0.0_526, -0.0_711, -0.0_541, 0.0_366, 0.2_078,
-0.0_267, 0.1_317, -0.0_226, -0.0_193, -0.0_014, -0.1_055, -0.0_902, 0.0_330,
0.1_391, 0.1_709, -0.0_562, -0.0_693, -0.0_560, 0.1_482, 0.0_381, -0.1_683,
-0.0_681, 0.0_661, 0.0_331, -0.0_046, -0.1_268, -0.1_431
])
_a : List[Any] = torch.tensor([
0.1_192, 0.1_240, 0.0_414, 0.0_606, -0.0_557, -0.0_412, 0.0_430, 0.2_042,
-0.0_200, 0.1_385, -0.0_115, -0.0_132, 0.0_017, -0.0_965, -0.0_802, 0.0_398,
0.1_433, 0.1_747, -0.0_458, -0.0_533, -0.0_407, 0.1_545, 0.0_419, -0.1_574,
-0.0_645, 0.0_626, 0.0_341, -0.0_010, -0.1_199, -0.1_390
])
_a : Optional[int] = torch.tensor([
0.1_075, 0.1_074, 0.0_205, 0.0_431, -0.0_774, -0.0_607, 0.0_298, 0.2_042,
-0.0_320, 0.1_267, -0.0_281, -0.0_250, -0.0_064, -0.1_091, -0.0_946, 0.0_290,
0.1_328, 0.1_650, -0.0_580, -0.0_738, -0.0_586, 0.1_440, 0.0_337, -0.1_746,
-0.0_712, 0.0_605, 0.0_250, -0.0_099, -0.1_316, -0.1_473
])
_a : Tuple = torch.tensor([
-1.4_572, -2.0_481, -0.0_414, -0.6_005, 1.4_136, 0.5_848, 0.4_028, -2.7_330,
1.2_212, -2.1_228, 0.2_155, 0.4_039, 0.7_662, 2.0_535, 0.7_477, -0.3_243,
-2.1_758, -2.7_648, 1.6_947, 0.7_026, 1.2_338, -1.6_078, -0.8_682, 2.2_810,
1.8_574, -0.5_718, -0.5_586, -0.0_186, 2.3_415, 2.1_251])
_a : List[Any] = torch.tensor([
-1.3_690, -1.9_720, -0.4_090, -0.6_966, 1.4_660, 0.9_938, -0.1_385, -2.7_324,
0.7_736, -1.8_917, 0.2_923, 0.4_293, 0.1_693, 1.4_112, 1.1_887, -0.3_181,
-2.2_160, -2.6_381, 1.3_170, 0.8_163, 0.9_240, -1.6_544, -0.6_099, 2.5_259,
1.6_430, -0.9_090, -0.9_392, -0.0_126, 2.4_268, 2.3_266
])
_a : Optional[Any] = torch.tensor([
-1.3_525, -1.9_628, -0.3_956, -0.6_860, 1.4_664, 1.0_014, -0.1_259, -2.7_212,
0.7_772, -1.8_811, 0.2_996, 0.4_388, 0.1_704, 1.4_029, 1.1_701, -0.3_027,
-2.2_053, -2.6_287, 1.3_350, 0.8_131, 0.9_274, -1.6_292, -0.6_098, 2.5_131,
1.6_505, -0.8_958, -0.9_298, -0.0_151, 2.4_257, 2.3_355
])
_a : Union[str, Any] = torch.tensor([
-2.0_585, -2.7_897, -0.2_850, -0.8_940, 1.9_052, 0.5_702, 0.6_345, -3.8_959,
1.5_932, -3.2_319, 0.1_974, 0.0_287, 1.7_566, 2.6_543, 0.8_387, -0.5_351,
-3.2_736, -4.3_375, 2.9_029, 1.6_390, 1.4_640, -2.1_701, -1.9_013, 2.9_341,
3.4_981, -0.6_255, -1.1_644, -0.1_591, 3.7_097, 3.2_066
])
_a : Optional[int] = torch.tensor([
-2.3_139, -2.5_594, -0.0_197, -0.6_785, 1.7_001, 1.1_606, 0.3_075, -2.1_740,
1.8_071, -2.5_630, -0.0_926, -0.3_811, 1.2_116, 2.6_246, 1.2_731, -0.5_398,
-2.8_153, -3.6_140, 2.3_893, 1.3_262, 1.6_258, -2.1_856, -1.3_267, 2.8_395,
2.3_779, -1.0_623, -1.2_468, 0.8_959, 3.3_367, 3.2_243
])
_a : Union[str, Any] = torch.tensor([
-2.0_628, -2.7_667, -0.2_089, -0.8_263, 2.0_539, 0.5_992, 0.6_495, -3.8_336,
1.6_025, -3.2_817, 0.1_721, -0.0_633, 1.7_516, 2.7_039, 0.8_100, -0.5_908,
-3.2_113, -4.4_343, 2.9_257, 1.3_632, 1.5_562, -2.1_489, -1.9_894, 3.0_560,
3.3_396, -0.7_328, -1.0_417, 0.0_383, 3.7_093, 3.2_343
])
_a : str = torch.tensor([
-1.4_574, -2.0_569, -0.0_473, -0.6_117, 1.4_018, 0.5_769, 0.4_129, -2.7_344,
1.2_241, -2.1_397, 0.2_000, 0.3_937, 0.7_616, 2.0_453, 0.7_324, -0.3_391,
-2.1_746, -2.7_744, 1.6_963, 0.6_921, 1.2_187, -1.6_172, -0.8_877, 2.2_439,
1.8_471, -0.5_839, -0.5_605, -0.0_464, 2.3_250, 2.1_219
])
# fmt: on
_a : Optional[Any] = api.list_models(filter="diffusers")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
_a : List[str] = "/home/patrick/google_checkpoints/" + mod.modelId.split("/")[-1]
print(f"""Started running {mod.modelId}!!!""")
if mod.modelId.startswith("CompVis"):
_a : int = UNetaDModel.from_pretrained(local_checkpoint, subfolder="unet")
else:
_a : Optional[int] = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
_a : str = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
_a : str = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
_a : str = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["_".join("_".join(mod.modelId.split("/")).split("-"))], atol=1e-3
)
print(f"""{mod.modelId} has passed successfully!!!""")
| 10 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.